id
int64 0
755k
| file_name
stringlengths 3
109
| file_path
stringlengths 13
185
| content
stringlengths 31
9.38M
| size
int64 31
9.38M
| language
stringclasses 1
value | extension
stringclasses 11
values | total_lines
int64 1
340k
| avg_line_length
float64 2.18
149k
| max_line_length
int64 7
2.22M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 6
65
| repo_stars
int64 100
47.3k
| repo_forks
int64 0
12k
| repo_open_issues
int64 0
3.4k
| repo_license
stringclasses 9
values | repo_extraction_date
stringclasses 92
values | exact_duplicates_redpajama
bool 2
classes | near_duplicates_redpajama
bool 2
classes | exact_duplicates_githubcode
bool 2
classes | exact_duplicates_stackv2
bool 1
class | exact_duplicates_stackv1
bool 2
classes | near_duplicates_githubcode
bool 2
classes | near_duplicates_stackv1
bool 2
classes | near_duplicates_stackv2
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
16,821
|
sphinxplugin.cpp
|
manticoresoftware_manticoresearch/src/sphinxplugin.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxint.h"
#include "sphinxutils.h"
#include "sphinxplugin.h"
#include "libutils.h"
//////////////////////////////////////////////////////////////////////////
// TYPES
//////////////////////////////////////////////////////////////////////////
/// loaded plugin library
class PluginLib_c final : public ISphRefcountedMT
{
protected:
CSphString m_sName;
void * m_pHandle; ///< handle from dlopen()
public:
int m_iHashedPlugins; ///< how many active g_hPlugins entries reference this handle
bool m_bDlGlobal = false;
PluginLib_c ( void * pHandle, const char * sName, bool bDlGlobal );
const CSphString & GetName() const { return m_sName; }
void * GetHandle() const { return m_pHandle; }
protected:
~PluginLib_c() final;
};
PluginLibRefPtr_c PluginDesc_c::GetLib() const
{
return m_pLib;
}
/// plugin key
struct PluginKey_t
{
PluginType_e m_eType { PLUGIN_FUNCTION };
CSphString m_sName;
PluginKey_t() = default;
PluginKey_t ( PluginType_e eType, const char * sName )
: m_eType ( eType )
, m_sName ( sName )
{
m_sName.ToLower();
}
static int Hash ( const PluginKey_t & v )
{
return sphCRC32 ( v.m_sName.cstr(), v.m_sName.Length(),
sphCRC32 ( &v.m_eType, sizeof(v.m_eType) ) );
}
bool operator == ( const PluginKey_t & rhs )
{
return m_eType==rhs.m_eType && m_sName==rhs.m_sName;
}
};
//////////////////////////////////////////////////////////////////////////
// GLOBALS
//////////////////////////////////////////////////////////////////////////
const char * g_dPluginTypes[PLUGIN_TOTAL] = { "udf", "ranker", "index_token_filter", "query_token_filter" };
//////////////////////////////////////////////////////////////////////////
static bool g_bPluginsEnabled = true; ///< is there any plugin support at all?
static CSphString g_sPluginDir = HARDCODED_PLUGIN_DIR;
static CSphMutex g_tPluginMutex; ///< common plugin mutex (access to lib, func and ranker hashes)
static SmallStringHash_T<PluginLibRefPtr_c> g_hPluginLibs GUARDED_BY ( g_tPluginMutex ); ///< key is the filename (no path)
static CSphOrderedHash<PluginDescRefPtr_c, PluginKey_t, PluginKey_t, 256> g_hPlugins GUARDED_BY ( g_tPluginMutex );
//////////////////////////////////////////////////////////////////////////
// PLUGIN MANAGER
//////////////////////////////////////////////////////////////////////////
PluginLib_c::PluginLib_c ( void * pHandle, const char * sName, bool bDlGlobal )
{
assert ( pHandle );
m_pHandle = pHandle;
m_iHashedPlugins = 0;
m_sName = sName;
m_sName.ToLower();
m_bDlGlobal = bDlGlobal;
}
PluginLib_c::~PluginLib_c()
{
#if HAVE_DLOPEN
int iRes = dlclose ( m_pHandle );
sphLogDebug ( "dlclose(%s)=%d", m_sName.cstr(), iRes );
#endif
}
PluginDesc_c::PluginDesc_c ( PluginLibRefPtr_c pLib )
{
assert ( pLib );
m_pLib = std::move (pLib);
}
PluginDesc_c::~PluginDesc_c() {}
const CSphString & PluginDesc_c::GetLibName() const
{
assert ( m_pLib );
return m_pLib->GetName();
}
//////////////////////////////////////////////////////////////////////////
static bool SetPluginDir ( const char * sDir, CSphString & sPlugin )
{
bool bEmpty = ( !sDir || !*sDir );
if ( !bEmpty )
{
if ( sphDirExists ( sDir, nullptr ) )
{
sPlugin = sDir;
return true;
}
}
if ( sphDirExists ( HARDCODED_PLUGIN_DIR, nullptr ) )
{
sPlugin = HARDCODED_PLUGIN_DIR;
return true;
} else
{
#if _WIN32
CSphString sWinInstall = GetWinInstallDir();
if ( !sWinInstall.IsEmpty() )
{
CSphString sWinInstallPlugin;
sWinInstallPlugin.SetSprintf ( "%s/%s", sWinInstall.cstr(), HARDCODED_PLUGIN_DIR );
if ( sphDirExists ( sWinInstallPlugin, nullptr ) )
{
sPlugin = sWinInstallPlugin;
return true;
}
}
#endif
}
return false;
}
void sphPluginInit ( const char * sDir )
{
g_bPluginsEnabled = SetPluginDir ( sDir, g_sPluginDir );
}
const CSphString & PluginGetDir()
{
return g_sPluginDir;
}
bool sphPluginParseSpec ( const CSphString & sParams, StrVec_t & dParams, CSphString & sError )
{
dParams.Resize ( 0 );
sphSplit ( dParams, sParams.cstr(), ":" );
switch ( dParams.GetLength() )
{
case 0:
return true;
case 1:
sError = "filter name required in spec string; example: \"plugins.so:myfilter\"";
return false;
case 2:
dParams.Add ( "" );
return true;
case 3:
return true;
}
sError = "too many parts in spec string; must be in \"plugins.so:myfilter:options\" format";
return false;
}
struct SymbolDesc_t
{
int m_iOffsetOf; ///< pointer member location in the descriptor structure
const char * m_sPostfix; ///< symbol name postfix
bool m_bRequired; ///< whether this symbol must be present
};
#if HAVE_DLOPEN
static bool PluginLoadSymbols ( void * pDesc, const SymbolDesc_t * pSymbol, void * pHandle, const char * sName, CSphString & sError )
{
// sError = "no dlopen(), no plugins";
// return false;
CSphString s;
while ( pSymbol->m_iOffsetOf>=0 )
{
s.SetSprintf ( pSymbol->m_sPostfix[0] ? "%s_%s" : "%s%s", sName, pSymbol->m_sPostfix );
auto ** ppFunc = (void**)((BYTE*)pDesc + pSymbol->m_iOffsetOf);
*ppFunc = dlsym ( pHandle, s.cstr() );
if ( !*ppFunc && pSymbol->m_bRequired )
{
sError.SetSprintf ( "symbol %s() not found", s.cstr() );
return false;
}
pSymbol++;
}
return true;
}
#endif // HAVE_DLOPEN
#if !_WIN32
#ifndef offsetof
#define offsetof(T, M) \
(reinterpret_cast<char*>(&(((T*)1000)->M)) - reinterpret_cast<char*>(1000))
#endif
#endif
#if HAVE_DLOPEN
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Winvalid-offsetof"
#endif
static SymbolDesc_t g_dSymbolsUDF[] =
{
{ static_cast<int>( offsetof(PluginUDF_c, m_fnInit)), "init", false },
{ static_cast<int>( offsetof(PluginUDF_c, m_fnFunc)), "", true },
{ static_cast<int>( offsetof(PluginUDF_c, m_fnDeinit)), "deinit", false },
{ -1, nullptr, false }
};
static SymbolDesc_t g_dSymbolsRanker[] =
{
{ static_cast<int>( offsetof(PluginRanker_c, m_fnInit)), "init", false },
{ static_cast<int>( offsetof(PluginRanker_c, m_fnUpdate)), "update", false },
{ static_cast<int>( offsetof(PluginRanker_c, m_fnFinalize)), "finalize", true },
{ static_cast<int>( offsetof(PluginRanker_c, m_fnDeinit)), "deinit", false },
{ -1, nullptr, false }
};
static SymbolDesc_t g_dSymbolsTokenFilter[] =
{
{ static_cast<int>( offsetof(PluginTokenFilter_c, m_fnInit)), "init", false },
{ static_cast<int>( offsetof(PluginTokenFilter_c, m_fnBeginDocument)), "begin_document", false },
{ static_cast<int>( offsetof(PluginTokenFilter_c, m_fnBeginField)), "begin_field", false },
{ static_cast<int>( offsetof(PluginTokenFilter_c, m_fnPushToken)), "push_token", true },
{ static_cast<int>( offsetof(PluginTokenFilter_c, m_fnGetExtraToken)), "get_extra_token", false },
{ static_cast<int>( offsetof(PluginTokenFilter_c, m_fnEndField)), "end_field", false },
{ static_cast<int>( offsetof(PluginTokenFilter_c, m_fnDeinit)), "deinit", false },
{ static_cast<int>( offsetof(PluginTokenFilter_c, m_fnTokenIsBlended)), "is_blended", false },
{ static_cast<int>( offsetof(PluginTokenFilter_c, m_fnTokenIsBlendedPart)), "is_blended_part", false },
{ -1, nullptr, false }
};
static SymbolDesc_t g_dSymbolsQueryTokenFilter[] =
{
{ static_cast<int>( offsetof(PluginQueryTokenFilter_c, m_fnInit)), "init", false },
{ static_cast<int>( offsetof(PluginQueryTokenFilter_c, m_fnPreMorph)), "pre_morph", false },
{ static_cast<int>( offsetof(PluginQueryTokenFilter_c, m_fnPostMorph)), "post_morph", false },
{ static_cast<int>( offsetof(PluginQueryTokenFilter_c, m_fnPushToken)), "push_token", false },
{ static_cast<int>( offsetof(PluginQueryTokenFilter_c, m_fnDeinit)), "deinit", false },
{ -1, nullptr, false }
};
void PluginLog ( const char * szMsg, int iLen )
{
if ( iLen<0 )
sphWarning ( "PLUGIN: %s", szMsg );
else
sphWarning ( "PLUGIN: %.*s", (int) iLen, szMsg );
}
static bool PluginOnLoadLibrary ( const PluginLibRefPtr_c& pLib, CSphString & sError ) REQUIRES ( g_tPluginMutex )
{
// library already loaded - no need to call plugin_load function
if ( g_hPluginLibs ( pLib->GetName() ) )
return true;
auto fnPluginLoad = (PluginLoad_fn) dlsym ( pLib->GetHandle(), "plugin_load" );
if ( fnPluginLoad )
{
char sErrBuf [ SPH_UDF_ERROR_LEN ] = { 0 };
if ( fnPluginLoad ( sErrBuf )!=0 )
{
sError = sErrBuf;
return false;
}
}
return true;
}
static bool PluginOnUnloadLibrary ( const PluginLibRefPtr_c& pLib, CSphString & sError )
{
auto fnPluginUnload = (PluginLoad_fn) dlsym ( pLib->GetHandle(), "plugin_unload" );
if ( fnPluginUnload )
{
char sErrBuf [ SPH_UDF_ERROR_LEN ] = { 0 };
if ( fnPluginUnload ( sErrBuf )!=0 )
{
sError = sErrBuf;
return false;
}
}
return true;
}
static PluginLibRefPtr_c LoadPluginLibrary ( const char * sLibName, CSphString & sError, bool bDlGlobal, bool bLinuxReload )
{
CSphString sTmpfile;
CSphString sLibfile;
sLibfile.SetSprintf ( "%s/%s", g_sPluginDir.cstr(), sLibName );
// dlopen caches the old file content, even if file was updated
// let's reload library from the temporary file to invalidate the cache
if ( bLinuxReload )
{
sTmpfile.SetSprintf ( "%s/%s.%u", g_sPluginDir.cstr(), sLibName, sphRand() );
if ( sph::rename ( sLibfile.cstr(), sTmpfile.cstr() ) )
{
sError.SetSprintf ( "failed to rename file (src=%s, dst=%s, errno=%d, error=%s)", sLibfile.cstr(), sTmpfile.cstr(), errno, strerrorm(errno) );
return nullptr;
}
}
int iFlags = ( bDlGlobal ? ( RTLD_LAZY | RTLD_GLOBAL ) : ( RTLD_LAZY | RTLD_LOCAL ) );
void * pHandle = dlopen ( bLinuxReload ? sTmpfile.cstr() : sLibfile.cstr(), iFlags );
if ( !pHandle )
{
const char * sDlerror = dlerror();
sError.SetSprintf ( "dlopen() failed: %s", sDlerror ? sDlerror : "(null)" );
return nullptr;
}
sphLogDebug ( "dlopen(%s)=%p", bLinuxReload ? sTmpfile.cstr() : sLibfile.cstr(), pHandle );
// rename file back to the original name
if ( bLinuxReload )
{
if ( sph::rename ( sTmpfile.cstr(), sLibfile.cstr() ) )
{
sError.SetSprintf ( "failed to rename file (src=%s, dst=%s, errno=%d, error=%s)", sTmpfile.cstr(), sLibfile.cstr(), errno, strerrorm(errno) );
dlclose ( pHandle );
return nullptr;
}
}
CSphString sBasename = sLibName;
const char * pDot = strchr ( sBasename.cstr(), '.' );
if ( pDot )
sBasename = sBasename.SubString ( 0, pDot-sBasename.cstr() );
CSphString sTmp;
auto fnVer = (PluginVer_fn) dlsym ( pHandle, sTmp.SetSprintf ( "%s_ver", sBasename.cstr() ).cstr() );
if ( !fnVer )
{
sError.SetSprintf ( "symbol '%s_ver' not found in '%s': update your UDF implementation", sBasename.cstr(), sLibName );
dlclose ( pHandle );
return nullptr;
}
if ( fnVer() < SPH_UDF_VERSION )
{
sError.SetSprintf ( "library '%s' was compiled using an older version of sphinxudf.h; it needs to be recompiled", sLibName );
dlclose ( pHandle );
return nullptr;
}
auto fnLogCb = (PluginLogCb_fn) dlsym ( pHandle, sTmp.SetSprintf ( "%s_setlogcb", sBasename.cstr ()).cstr ());
if ( fnLogCb ) {
fnLogCb(PluginLog);
}
return PluginLibRefPtr_c { new PluginLib_c ( pHandle, sLibName, bDlGlobal ) };
}
#endif
bool sphPluginCreate ( const char * szLib, PluginType_e eType, const char * sName, ESphAttr eUDFRetType, CSphString & sError )
{
return sphPluginCreate ( szLib, eType, sName, eUDFRetType, false, sError );
}
bool sphPluginCreate ( const char * szLib, PluginType_e eType, const char * sName, ESphAttr eUDFRetType, bool bDlGlobal, CSphString & sError )
{
#if !HAVE_DLOPEN
sError = "no dlopen(), no plugins";
return false;
#else
if ( !g_bPluginsEnabled )
{
sError = "plugin support disabled (requires a valid plugin_dir)";
return false;
}
// validate library name
for ( const char * p = szLib; *p; p++ )
if ( *p=='/' || *p=='\\' )
{
sError = "restricted character (path delimiter) in a library file name";
return false;
}
CSphString sLib = szLib;
sLib.ToLower();
// FIXME? preregister known rankers instead?
if ( eType==PLUGIN_RANKER )
{
for ( int i=0; i<SPH_RANK_TOTAL; i++ )
{
const char * r = sphGetRankerName ( ESphRankMode(i) );
if ( r && strcasecmp ( sName, r )==0 )
{
sError.SetSprintf ( "%s is a reserved ranker name", r );
return false;
}
}
}
// from here, we need a lock (we intend to update the plugin hash)
ScopedMutex_t tLock ( g_tPluginMutex );
// validate function name
PluginKey_t k ( eType, sName );
if ( g_hPlugins(k) )
{
sError.SetSprintf ( "plugin '%s' already exists", k.m_sName.cstr() );
return false;
}
// lookup or load library
PluginLibRefPtr_c pLib;
if ( g_hPluginLibs ( sLib ) )
pLib = g_hPluginLibs [ sLib ];
else
pLib = LoadPluginLibrary ( sLib.cstr(), sError, bDlGlobal, false );
if ( !pLib )
return false;
assert ( pLib->GetHandle() );
if ( !PluginOnLoadLibrary ( pLib, sError ) )
return false;
PluginDescRefPtr_c pPlugin;
const SymbolDesc_t * pSym = nullptr;
switch ( eType )
{
case PLUGIN_RANKER: pPlugin = new PluginRanker_c ( pLib ); pSym = g_dSymbolsRanker; break;
case PLUGIN_INDEX_TOKEN_FILTER: pPlugin = new PluginTokenFilter_c ( pLib ); pSym = g_dSymbolsTokenFilter; break;
case PLUGIN_QUERY_TOKEN_FILTER: pPlugin = new PluginQueryTokenFilter_c ( pLib ); pSym = g_dSymbolsQueryTokenFilter; break;
case PLUGIN_FUNCTION: pPlugin = new PluginUDF_c ( pLib, eUDFRetType ); pSym = g_dSymbolsUDF; break;
default:
sError.SetSprintf ( "INTERNAL ERROR: unknown plugin type %d in CreatePlugin()", (int)eType );
return false;
}
if ( !PluginLoadSymbols ( pPlugin, pSym, pLib->GetHandle(), k.m_sName.cstr(), sError ) )
{
sError.SetSprintf ( "%s in %s", sError.cstr(), sLib.cstr() );
return false;
}
// add library if needed
if ( !g_hPluginLibs ( sLib ) )
Verify ( g_hPluginLibs.Add ( pLib, pLib->GetName() ) );
// add function
Verify ( g_hPlugins.Add ( pPlugin, k ) );
++pPlugin->GetLib()->m_iHashedPlugins;
return true;
#endif // HAVE_DLOPEN
}
bool sphPluginDrop ( PluginType_e eType, const char * sName, CSphString & sError )
{
#if !HAVE_DLOPEN
sError = "no dlopen(), no plugins";
return false;
#else
ScopedMutex_t tLock ( g_tPluginMutex );
PluginKey_t tKey ( eType, sName );
PluginDescRefPtr_c* ppPlugin = g_hPlugins(tKey);
if ( !ppPlugin || !*ppPlugin )
{
sError.SetSprintf ( "plugin '%s' does not exist", sName );
return false;
}
PluginLibRefPtr_c pLib = ( *ppPlugin )->GetLib();
Verify ( g_hPlugins.Delete(tKey) );
bool bUnloaded = true;
if ( --pLib->m_iHashedPlugins==0 )
{
bUnloaded = PluginOnUnloadLibrary ( pLib, sError );
g_hPluginLibs.Delete ( pLib->GetName() );
}
return bUnloaded;
#endif // HAVE_DLOPEN
}
bool sphPluginReload ( const char * sName, CSphString & sError )
{
#if !HAVE_DLOPEN
sError = "no dlopen(), no plugins";
return false;
#else
// find all plugins from the given library
ScopedMutex_t tLock ( g_tPluginMutex );
CSphVector<PluginKey_t> dKeys;
CSphVector<PluginDescRefPtr_c> dPlugins;
bool bDlGlobal = false;
for ( const auto& tPlugin : g_hPlugins )
{
PluginDescRefPtr_c v = tPlugin.second;
if ( v->GetLibName()==sName )
{
dKeys.Add ( tPlugin.first );
dPlugins.Add ( v );
bDlGlobal = v->GetLib()->m_bDlGlobal;
}
}
// no plugins loaded? oops
if ( dPlugins.GetLength()==0 )
{
sError.SetSprintf ( "no active plugins loaded from %s", sName );
return false;
}
// load new library and check every plugin
PluginLibRefPtr_c pNewLib = LoadPluginLibrary ( sName, sError, bDlGlobal,
#if !_WIN32
true
#else
false
#endif
);
if ( !pNewLib )
return false;
// load all plugins
CSphVector<PluginDescRefPtr_c> dNewPlugins;
ARRAY_FOREACH ( i, dPlugins )
{
PluginDescRefPtr_c pDesc;
const SymbolDesc_t * pSym = nullptr;
switch ( dKeys[i].m_eType )
{
case PLUGIN_RANKER: pDesc = new PluginRanker_c ( pNewLib ); pSym = g_dSymbolsRanker; break;
case PLUGIN_INDEX_TOKEN_FILTER: pDesc = new PluginTokenFilter_c ( pNewLib ); pSym = g_dSymbolsTokenFilter; break;
case PLUGIN_QUERY_TOKEN_FILTER: pDesc = new PluginQueryTokenFilter_c ( pNewLib ); pSym = g_dSymbolsQueryTokenFilter; break;
case PLUGIN_FUNCTION: pDesc = new PluginUDF_c ( pNewLib, dPlugins[i]->GetUdfRetType() ); pSym = g_dSymbolsUDF; break;
default:
sphDie ( "INTERNAL ERROR: unknown plugin type %d in sphPluginReload()", (int)dKeys[i].m_eType );
return false;
}
if ( !PluginLoadSymbols ( pDesc, pSym, pNewLib->GetHandle(), dKeys[i].m_sName.cstr(), sError ) )
break;
dNewPlugins.Add ( pDesc );
}
// if there was a problem loading any of the plugins, time to fail
if ( dPlugins.GetLength()!=dNewPlugins.GetLength() )
{
sError.SetSprintf ( "failed to import plugin %s: %s", dKeys [ dNewPlugins.GetLength() ].m_sName.cstr(), sError.cstr() );
return false;
}
// unregister and release the old references
PluginLibRefPtr_c pOldLib = dPlugins[0]->GetLib();
ARRAY_FOREACH ( i, dPlugins )
{
assert ( dPlugins[i]->GetLib()==pOldLib );
Verify ( g_hPlugins.Delete ( dKeys[i] ) );
}
assert ( pOldLib->m_iHashedPlugins==dPlugins.GetLength() );
pOldLib->m_iHashedPlugins = 0;
PluginOnUnloadLibrary ( pOldLib, sError );
Verify ( g_hPluginLibs.Delete ( pOldLib->GetName() ) );
if ( !PluginOnLoadLibrary ( pNewLib, sError ) )
return false;
// register new references
g_hPluginLibs.Add ( pNewLib, pNewLib->GetName() );
ARRAY_FOREACH ( i, dNewPlugins )
Verify ( g_hPlugins.Add ( dNewPlugins[i], dKeys[i] ) );
assert ( pNewLib->m_iHashedPlugins==0 );
pNewLib->m_iHashedPlugins = dNewPlugins.GetLength();
sphLogDebug ( "reloaded %d plugins", dNewPlugins.GetLength() );
return true;
#endif // HAVE_DLOPEN
}
PluginDescRefPtr_c PluginAcquireDesc ( const char * szLib, PluginType_e eType, const char * szName, CSphString & sError )
{
PluginDescRefPtr_c pDesc = PluginGetDesc ( eType, szName );
if ( !pDesc )
{
if ( !sphPluginCreate ( szLib, eType, szName, SPH_ATTR_NONE, false, sError ) )
return nullptr;
return PluginGetDesc ( eType, szName );
}
CSphString sLib ( szLib );
sLib.ToLower();
if ( pDesc->GetLibName()==sLib )
return pDesc;
sError.SetSprintf ( "unable to load plugin '%s' from '%s': it has already been loaded from library '%s'",
szName, sLib.cstr(), pDesc->GetLibName().cstr() );
return nullptr;
}
static const char * UdfReturnType ( ESphAttr eType )
{
switch ( eType )
{
case SPH_ATTR_INTEGER: return "INT";
case SPH_ATTR_FLOAT: return "FLOAT";
case SPH_ATTR_STRINGPTR: return "STRING";
case SPH_ATTR_BIGINT: return "BIGINT";
default: assert ( 0 && "unknown UDF return type" ); return "???";
}
}
void sphPluginSaveState ( CSphWriter & tWriter )
{
ScopedMutex_t tLock ( g_tPluginMutex );
for ( const auto& tPlugin : g_hPlugins )
{
const PluginKey_t & k = tPlugin.first;
const PluginDescRefPtr_c v = tPlugin.second;
CSphString sBuf;
if ( k.m_eType==PLUGIN_FUNCTION )
sBuf.SetSprintf ( "CREATE FUNCTION %s RETURNS %s SONAME '%s';\n", k.m_sName.cstr(),
UdfReturnType ( ((PluginUDF_c*)v.Ptr())->m_eRetType ), v->GetLibName().cstr() );
else
sBuf.SetSprintf ( "CREATE PLUGIN %s TYPE '%s' SONAME '%s';\n",
k.m_sName.cstr(), g_dPluginTypes[k.m_eType], v->GetLibName().cstr() );
tWriter.PutBytes ( sBuf.cstr(), sBuf.Length() );
}
}
PluginType_e sphPluginGetType ( const CSphString & s )
{
if ( s=="ranker" ) return PLUGIN_RANKER;
if ( s=="index_token_filter" ) return PLUGIN_INDEX_TOKEN_FILTER;
if ( s=="query_token_filter" ) return PLUGIN_QUERY_TOKEN_FILTER;
return PLUGIN_TOTAL;
}
bool sphPluginExists ( PluginType_e eType, const char * sName )
{
if ( !g_bPluginsEnabled )
return false;
ScopedMutex_t tLock ( g_tPluginMutex );
PluginKey_t k ( eType, sName );
PluginDescRefPtr_c* pp = g_hPlugins(k);
return pp && *pp;
}
PluginDescRefPtr_c PluginGetDesc ( PluginType_e eType, const char * sName )
{
if ( !g_bPluginsEnabled )
return nullptr;
ScopedMutex_t tLock ( g_tPluginMutex );
PluginKey_t k ( eType, sName );
PluginDescRefPtr_c* pp = g_hPlugins(k);
if ( !pp || !*pp )
return nullptr;
return *pp;
}
void sphPluginList ( CSphVector<PluginInfo_t> & dResult )
{
if ( !g_bPluginsEnabled )
return;
ScopedMutex_t tLock ( g_tPluginMutex );
for ( const auto& tPlugin : g_hPlugins )
{
const PluginKey_t & k = tPlugin.first;
const PluginDescRefPtr_c v = tPlugin.second;
PluginInfo_t & p = dResult.Add();
p.m_eType = k.m_eType;
p.m_sName = k.m_sName;
p.m_sLib = v->GetLibName().cstr();
p.m_iUsers = v->GetRefcount() - 1; // except the one reference from the hash itself
if ( p.m_eType==PLUGIN_FUNCTION )
p.m_sExtra = UdfReturnType ( ((PluginUDF_c*)v.Ptr())->m_eRetType );
}
}
PluginUDF_c::PluginUDF_c ( PluginLibRefPtr_c pLib, ESphAttr eRetType )
: PluginDesc_c ( std::move ( pLib ) )
, m_eRetType ( eRetType )
{}
PluginRanker_c::PluginRanker_c ( PluginLibRefPtr_c pLib )
: PluginDesc_c ( std::move ( pLib ) )
{}
PluginTokenFilter_c::PluginTokenFilter_c ( PluginLibRefPtr_c pLib )
: PluginDesc_c ( std::move ( pLib ) )
{}
PluginQueryTokenFilter_c::PluginQueryTokenFilter_c ( PluginLibRefPtr_c pLib )
: PluginDesc_c ( std::move ( pLib ) )
{}
| 21,580
|
C++
|
.cpp
| 631
| 31.88748
| 145
| 0.676079
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,822
|
compressed_http.cpp
|
manticoresoftware_manticoresearch/src/compressed_http.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "compressed_http.h"
#if WITH_ZLIB
#include <zlib.h>
#endif
bool GzipDecompress ( const ByteBlob_t sIn, CSphVector<BYTE> & dRes, CSphString & sError )
{
#if WITH_ZLIB
const int iWindowBits = MAX_WBITS | 32;
z_stream tInflate;
tInflate.zalloc = Z_NULL;
tInflate.zfree = Z_NULL;
tInflate.opaque = Z_NULL;
tInflate.avail_in = 0;
tInflate.next_in = Z_NULL;
if ( inflateInit2 ( &tInflate, iWindowBits )!=Z_OK )
{
sError = "gzip error: init failed";
return false;
}
tInflate.next_in = (z_const Bytef *)sIn.first;
tInflate.avail_in = sIn.second;
tInflate.avail_out = 0;
int iDecompressed = 0;
int iBufSize = Max ( 4096, sIn.second * 2 );
while ( tInflate.avail_out==0 )
{
dRes.Resize ( iDecompressed + iBufSize );
tInflate.next_out = dRes.Begin() + iDecompressed;
tInflate.avail_out = iBufSize;
int iRes = inflate ( &tInflate, Z_FINISH );
if ( iRes!=Z_STREAM_END && iRes!=Z_OK && iRes!=Z_BUF_ERROR )
{
sError.SetSprintf ( "gzip error: %s", tInflate.msg );
inflateEnd ( &tInflate );
return false;
}
iDecompressed += ( iBufSize - tInflate.avail_out );
}
inflateEnd ( &tInflate );
dRes.Resize ( iDecompressed );
return true;
#else
sError = "gzip error: unpack is not supported, rebuild with zlib";
return false;
#endif
}
bool HasGzip()
{
#if WITH_ZLIB
return true;
#else
return false;
#endif
}
| 1,740
|
C++
|
.cpp
| 63
| 25.539683
| 90
| 0.711966
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,823
|
joinsorter.cpp
|
manticoresoftware_manticoresearch/src/joinsorter.cpp
|
//
// Copyright (c) 2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "joinsorter.h"
#include "std/hash.h"
#include "std/openhash.h"
#include "sphinxquery.h"
#include "sphinxsort.h"
#include "sphinxjson.h"
#include "querycontext.h"
#include "docstore.h"
static int64_t g_iJoinCacheSize = 20971520;
void SetJoinCacheSize ( int64_t iSize )
{
g_iJoinCacheSize = iSize;
}
int64_t GetJoinCacheSize()
{
return g_iJoinCacheSize;
}
static bool GetJoinAttrName ( const CSphString & sAttr, const CSphString & sJoinedIndex, CSphString * pModified = nullptr )
{
CSphString sPrefix;
sPrefix.SetSprintf ( "%s.", sJoinedIndex.cstr() );
int iPrefixLen = sPrefix.Length();
bool bRightTable = false;
CSphString sMod = sAttr;
const char * szStart = sMod.cstr();
while ( true )
{
const char * szFound = strstr ( sMod.cstr(), sPrefix.cstr() );
if ( !szFound )
break;
if ( szFound > szStart )
{
char c = *(szFound-1);
if ( ( c>='0' && c<='9' ) || ( c>='a' && c<='z' ) || ( c>='A' && c<='Z' ) || c=='_' )
continue;
}
bRightTable = true;
int iStart = szFound-sMod.cstr();
CSphString sNewExprPre = iStart > 0 ? sMod.SubString ( 0, iStart ) : "";
int iPostLen = sMod.Length()-iStart-iPrefixLen;
CSphString sNewExprPost = iPostLen > 0 ? sMod.SubString ( iStart + iPrefixLen, sMod.Length()-iStart-iPrefixLen ) : "";
sMod.SetSprintf ( "%s%s", sNewExprPre.cstr(), sNewExprPost.cstr() );
}
if ( bRightTable )
{
if ( pModified )
*pModified = sMod;
return true;
}
return false;
}
static StrVec_t ParseGroupBy ( const CSphString & sGroupBy )
{
StrVec_t dRes;
sphSplit ( dRes, sGroupBy.cstr(), ", \t\n" );
return dRes;
}
CSphVector<std::pair<int,bool>> FetchJoinRightTableFilters ( const CSphVector<CSphFilterSettings> & dFilters, const ISphSchema & tSchema, const char * szJoinedIndex )
{
CSphString sPrefix;
sPrefix.SetSprintf ( "%s.", szJoinedIndex );
CSphVector<std::pair<int,bool>> dRightFilters;
ARRAY_FOREACH ( i, dFilters )
{
const auto & tFilter = dFilters[i];
bool bHasPrefix = tFilter.m_sAttrName.Begins ( sPrefix.cstr() );
const CSphColumnInfo * pFilterAttr = tSchema.GetAttr ( tFilter.m_sAttrName.cstr() );
if ( pFilterAttr )
{
if ( !pFilterAttr->IsJoined() )
continue;
}
else
{
if ( !bHasPrefix )
continue;
}
dRightFilters.Add ( {i,bHasPrefix} );
}
return dRightFilters;
}
bool NeedToMoveMixedJoinFilters ( const CSphQuery & tQuery, const ISphSchema & tSchema )
{
CSphVector<std::pair<int,bool>> dRightFilters = FetchJoinRightTableFilters ( tQuery.m_dFilters, tSchema, tQuery.m_sJoinIdx.cstr() );
if ( !dRightFilters.GetLength() )
return false;
// move all filters to the left query in case of LEFT JOIN
// otherwise we can't distinguish between 'no match' and 'match with null part from right table'
if ( tQuery.m_eJoinType==JoinType_e::LEFT )
return true;
if ( !tQuery.m_dFilterTree.GetLength() )
return false;
return dRightFilters.GetLength()!=tQuery.m_dFilters.GetLength();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// FIXME! maybe replace it with a LRU cache
class MatchCache_c
{
public:
MatchCache_c ( uint64_t uCacheSize );
~MatchCache_c();
void SetSchema ( const ISphSchema * pSchema );
bool Add ( uint64_t uHash, const CSphSwapVector<CSphMatch> & dMatches );
FORCE_INLINE bool Fetch ( uint64_t uHash, CSphSwapVector<CSphMatch> & dMatches );
private:
// a simplified match (incoming matche don't have a static part)
struct StoredMatch_t
{
CSphRowitem * m_pDynamic = nullptr;
};
using StoredMatches_t = CSphVector<StoredMatch_t>;
OpenHashTable_T<uint64_t, StoredMatches_t> m_hCache;
uint64_t m_uMaxSize = 0;
uint64_t m_uCurSize = 0;
std::unique_ptr<ISphSchema> m_pSchema;
uint64_t CalcMatchMem ( const CSphMatch & tMatch );
};
MatchCache_c::MatchCache_c ( uint64_t uCacheSize )
: m_uMaxSize ( uCacheSize )
{}
MatchCache_c::~MatchCache_c()
{
int64_t iIterator = 0;
std::pair<SphGroupKey_t, StoredMatches_t*> tRes;
while ( ( tRes = m_hCache.Iterate ( iIterator ) ).second )
{
StoredMatches_t & dMatches = *tRes.second;
for ( auto & i : dMatches )
{
CSphMatch tStub;
tStub.m_pDynamic = i.m_pDynamic;
m_pSchema->FreeDataPtrs(tStub);
tStub.ResetDynamic();
}
}
}
void MatchCache_c::SetSchema ( const ISphSchema * pSchema )
{
if ( m_pSchema )
return;
// keep a clone of the schema
// we assume that the schema won't change during the lifetime of the cache
m_pSchema = std::unique_ptr<ISphSchema> ( pSchema->CloneMe() );
}
uint64_t MatchCache_c::CalcMatchMem ( const CSphMatch & tMatch )
{
uint64_t uMem = 0;
for ( int i = 0; i < m_pSchema->GetAttrsCount(); i++ )
{
const auto & tAttr = m_pSchema->GetAttr(i);
if ( !tAttr.IsDataPtr() )
continue;
const BYTE * pBlob = (const BYTE *)sphGetRowAttr ( tMatch.m_pDynamic, tAttr.m_tLocator );
uMem += sphUnpackPtrAttr(pBlob).second;
}
uMem += m_pSchema->GetDynamicSize()*sizeof(CSphRowitem);
uMem += sizeof(StoredMatch_t);
return uMem;
}
bool MatchCache_c::Add ( uint64_t uHash, const CSphSwapVector<CSphMatch> & dMatches )
{
if ( !m_pSchema )
return false;
if ( m_uCurSize>=m_uMaxSize )
return false;
StoredMatches_t dStoredMatches;
for ( const auto & i : dMatches )
{
dStoredMatches.Add ( { i.m_pDynamic } );
m_uCurSize += CalcMatchMem(i);
}
m_hCache.Add ( uHash, dStoredMatches );
return true;
}
bool MatchCache_c::Fetch ( uint64_t uHash, CSphSwapVector<CSphMatch> & dMatches )
{
StoredMatches_t * pMatches = m_hCache.Find(uHash);
if ( !pMatches )
return false;
dMatches.Resize ( pMatches->GetLength() );
for ( int i = 0; i < pMatches->GetLength(); i++ )
dMatches[i].m_pDynamic = (*pMatches)[i].m_pDynamic;
return true;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class MatchCalc_c : public MatchProcessor_i
{
public:
MatchCalc_c ( ISphMatchSorter * pSorter ) : m_pSorter ( pSorter ) {}
void Process ( CSphMatch * pMatch ) override { m_pSorter->Push ( *pMatch ); }
bool ProcessInRowIdOrder() const override { return false; }
void Process ( VecTraits_T<CSphMatch *> & dMatches ) override
{
for ( auto & i : dMatches )
Process(i);
}
protected:
ISphMatchSorter * m_pSorter = nullptr;
};
class MatchCalcGrouped_c : public MatchCalc_c
{
public:
MatchCalcGrouped_c ( ISphMatchSorter * pSorter ) : MatchCalc_c ( pSorter ) {}
void Process ( CSphMatch * pMatch ) final
{
m_pSorter->PushGrouped ( *pMatch, m_bFirst );
m_bFirst = false;
}
private:
bool m_bFirst = true;
};
class StoredFetch_c : public MatchProcessor_i
{
public:
StoredFetch_c ( const ISphSchema & tSorterSchema, const CSphIndex & tRightIndex, uint64_t uNullMask );
void Process ( CSphMatch * pMatch ) override;
bool ProcessInRowIdOrder() const override { return false; }
void Process ( VecTraits_T<CSphMatch *> & dMatches ) override;
bool HasFieldToFetch() const { return !m_dFieldToFetch.IsEmpty(); }
private:
const ISphSchema & m_tSorterSchema;
const CSphIndex & m_tRightIndex;
const CSphColumnInfo * m_pId = nullptr;
const CSphColumnInfo * m_pNullMaskAttr = nullptr;
std::unique_ptr<DocstoreSession_c> m_pSession;
int64_t m_iSessionUID = 0;
IntVec_t m_dFieldToFetch;
CSphVector<CSphAttrLocator> m_dAttrRemap;
uint64_t m_uNullMask = 0;
void CreateDocstoreSession();
void SetupAttrRemap();
};
StoredFetch_c::StoredFetch_c ( const ISphSchema & tSorterSchema, const CSphIndex & tRightIndex, uint64_t uNullMask )
: m_tSorterSchema ( tSorterSchema )
, m_tRightIndex ( tRightIndex )
, m_uNullMask ( uNullMask )
{
CreateDocstoreSession();
SetupAttrRemap();
}
void StoredFetch_c::Process ( CSphMatch * pMatch )
{
assert ( !m_dFieldToFetch.IsEmpty() );
assert(m_pId);
SphAttr_t tDocId = pMatch->GetAttr ( m_pId->m_tLocator );
// compare against a preset null mask that the join sorter uses
bool bNull = m_pNullMaskAttr && pMatch->GetAttr ( m_pNullMaskAttr->m_tLocator )==m_uNullMask;
DocstoreDoc_t tDoc;
if ( !bNull && m_tRightIndex.GetDoc ( tDoc, tDocId, &m_dFieldToFetch, m_iSessionUID, true ) )
{
ARRAY_FOREACH ( i, tDoc.m_dFields )
{
const CSphAttrLocator & tLoc = m_dAttrRemap[i];
auto pPrev = (BYTE*)pMatch->GetAttr(tLoc);
SafeDeleteArray(pPrev);
pMatch->SetAttr ( tLoc, (SphAttr_t)tDoc.m_dFields[i].LeakData() );
}
}
else
{
ARRAY_FOREACH ( i, tDoc.m_dFields )
{
auto pPrev = (BYTE*)pMatch->GetAttr ( m_dAttrRemap[i] );
SafeDeleteArray(pPrev);
}
}
}
void StoredFetch_c::Process ( VecTraits_T<CSphMatch *> & dMatches )
{
for ( auto & i : dMatches )
Process(i);
}
void StoredFetch_c::CreateDocstoreSession()
{
m_pSession = std::make_unique<DocstoreSession_c>();
m_iSessionUID = m_pSession->GetUID();
// spawn buffered readers for the current session
m_tRightIndex.CreateReader(m_iSessionUID);
}
void StoredFetch_c::SetupAttrRemap()
{
CSphString sAttrName;
sAttrName.SetSprintf ( "%s.%s", m_tRightIndex.GetName(), sphGetDocidName() );
m_pId = m_tSorterSchema.GetAttr ( sAttrName.cstr() );
assert(m_pId);
const ISphSchema & tRightSchema = m_tRightIndex.GetMatchSchema();
for ( int i = 0; i < tRightSchema.GetFieldsCount(); i++ )
{
const CSphColumnInfo & tField = tRightSchema.GetField(i);
if ( !( tField.m_uFieldFlags & CSphColumnInfo::FIELD_STORED ) )
continue;
CSphString sAttrName;
sAttrName.SetSprintf ( "%s.%s", m_tRightIndex.GetName(), tField.m_sName.cstr() );
int iDocStoreFieldId = m_tRightIndex.GetFieldId ( tField.m_sName, DOCSTORE_TEXT );
int iAttrId = m_tSorterSchema.GetAttrIndex ( sAttrName.cstr() );
assert ( iDocStoreFieldId!=-1 && iAttrId!=-1 );
m_dFieldToFetch.Add(iDocStoreFieldId);
m_dAttrRemap.Add ( m_tSorterSchema.GetAttr(iAttrId).m_tLocator );
}
m_pNullMaskAttr = m_tSorterSchema.GetAttr ( GetNullMaskAttrName() );
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class FilterEval_c
{
public:
void SetFilter ( std::unique_ptr<ISphFilter> & pFilter ) { m_pFilter = std::move ( pFilter ); }
void SetBlobPool ( const BYTE * pBlobPool ) { if ( m_pFilter ) m_pFilter->SetBlobStorage(pBlobPool); }
void SetColumnar ( columnar::Columnar_i * pColumnar ) { if ( m_pFilter ) m_pFilter->SetColumnar(pColumnar); }
FORCE_INLINE bool Eval ( const CSphMatch & tMatch ) const { return m_pFilter ? m_pFilter->Eval(tMatch) : true; }
private:
std::unique_ptr<ISphFilter> m_pFilter;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class JoinSorter_c : public ISphMatchSorter
{
public:
JoinSorter_c ( const CSphIndex * pIndex, const CSphIndex * pJoinedIndex, const CSphQuery & tQuery, ISphMatchSorter * pSorter, bool bJoinedGroupSort );
JoinSorter_c ( const CSphIndex * pIndex, const CSphIndex * pJoinedIndex, const VecTraits_T<const CSphQuery> & dQueries, ISphMatchSorter * pSorter, bool bJoinedGroupSort );
bool IsGroupby() const override { return m_pSorter->IsGroupby(); }
void SetState ( const CSphMatchComparatorState & tState ) override { m_pSorter->SetState(tState); }
const CSphMatchComparatorState & GetState() const override { return m_pSorter->GetState(); }
void SetGroupState ( const CSphMatchComparatorState & tState ) override { m_pSorter->SetGroupState(tState); }
void SetBlobPool ( const BYTE * pBlobPool ) override;
void SetColumnar ( columnar::Columnar_i * pColumnar ) override;
void SetSchema ( ISphSchema * pSchema, bool bRemapCmp ) override;
const ISphSchema * GetSchema() const override { return m_pSorter->GetSchema(); }
bool Push ( const CSphMatch & tEntry ) override { return Push_T ( tEntry, [this]( const CSphMatch & tMatch ){ return m_pSorter->Push(tMatch); } ); }
void Push ( const VecTraits_T<const CSphMatch> & dMatches ) override;
bool PushGrouped ( const CSphMatch & tEntry, bool bNewSet ) override { return Push_T ( tEntry, [this,bNewSet]( const CSphMatch & tMatch ){ return m_pSorter->PushGrouped ( tMatch, bNewSet ); } ); }
int GetLength() override { return m_pSorter->GetLength(); }
int64_t GetTotalCount() const override { return m_pSorter->GetTotalCount(); }
void Finalize ( MatchProcessor_i & tProcessor, bool bCallProcessInResultSetOrder, bool bFinalizeMatches ) override { m_pSorter->Finalize ( tProcessor, bCallProcessInResultSetOrder, bFinalizeMatches ); }
int Flatten ( CSphMatch * pTo ) override { return m_pSorter->Flatten(pTo); }
const CSphMatch * GetWorst() const override { return m_pSorter->GetWorst(); }
bool CanBeCloned() const override { return m_pSorter->CanBeCloned(); }
ISphMatchSorter * Clone() const override;
void MoveTo ( ISphMatchSorter * pRhs, bool bCopyMeta ) override { m_pSorter->MoveTo ( ((JoinSorter_c *)pRhs)->m_pSorter.get(), bCopyMeta ); }
void CloneTo ( ISphMatchSorter * pTrg ) const override { m_pSorter->CloneTo(pTrg); }
void SetFilteredAttrs ( const sph::StringSet & hAttrs, bool bAddDocid ) override { m_pSorter->SetFilteredAttrs(hAttrs, bAddDocid); }
void TransformPooled2StandalonePtrs ( GetBlobPoolFromMatch_fn fnBlobPoolFromMatch, GetColumnarFromMatch_fn fnGetColumnarFromMatch, bool bFinalizeSorters ) override { m_pSorter->TransformPooled2StandalonePtrs(fnBlobPoolFromMatch, fnGetColumnarFromMatch, bFinalizeSorters); }
void SetRandom ( bool bRandom ) override { m_pSorter->SetRandom(bRandom); }
bool IsRandom() const override { return m_pSorter->IsRandom(); }
int GetMatchCapacity() const override { return m_pSorter->GetMatchCapacity(); }
RowTagged_t GetJustPushed() const override { return m_pSorter->GetJustPushed(); }
VecTraits_T<RowTagged_t> GetJustPopped() const override { return m_pSorter->GetJustPopped(); }
bool IsCutoffDisabled() const override { return m_pSorter->IsCutoffDisabled(); }
void SetMerge ( bool bMerge ) override { m_pSorter->SetMerge(bMerge); }
bool IsPrecalc() const override { return false; }
bool IsJoin() const override { return true; }
bool FinalizeJoin ( CSphString & sError, CSphString & sWarning ) override;
bool GetErrorFlag() const { return m_bErrorFlag; }
const CSphString & GetErrorMessage() const { return m_sErrorMessage; }
protected:
template <typename PUSH> FORCE_INLINE bool Push_T ( const CSphMatch & tMatch, PUSH && fnPush );
template <typename PUSH> FORCE_INLINE bool PushJoinedMatches ( const CSphMatch & tEntry, PUSH && fnPush );
template <typename PUSH> FORCE_INLINE bool PushLeftMatch ( const CSphMatch & tEntry, PUSH && fnPush );
private:
struct JoinAttrNameRemap_t
{
CSphString m_sFrom;
StrVec_t m_dTo;
};
struct JoinAttrRemap_t
{
CSphAttrLocator m_tLocSrc;
CSphAttrLocator m_tLocDst;
bool m_bJsonRepack = false;
};
struct FilterRemap_t
{
int m_iFilterId = -1;
CSphAttrLocator m_tLocator;
bool m_bBlob = false;
};
CSphQuery m_tJoinQuery;
std::unique_ptr<QueryParser_i> m_pJoinQueryParser;
const CSphIndex * m_pIndex = nullptr;
const CSphIndex * m_pJoinedIndex = nullptr;
const CSphQuery & m_tQuery;
VecTraits_T<const CSphQuery> m_dQueries;
FilterEval_c m_tMixedFilter;
CSphMatch m_tMatch;
std::unique_ptr<ISphMatchSorter> m_pSorter;
std::unique_ptr<ISphMatchSorter> m_pOriginalSorter;
std::unique_ptr<ISphMatchSorter> m_pRightSorter;
std::unique_ptr<ISphSchema> m_pRightSorterRsetSchema;
const BYTE * m_pBlobPool = nullptr;
const CSphColumnInfo * m_pAttrNullBitmask = nullptr;
CSphSwapVector<CSphMatch> m_dMatches;
CSphVector<JoinAttrNameRemap_t> m_dAttrRemap;
CSphVector<JoinAttrRemap_t> m_dJoinRemap;
bool m_bNeedToSetupRemap = true;
CSphVector<FilterRemap_t> m_dFilterRemap;
int m_iDynamicSize = 0;
bool m_bFinalCalcOnly = false;
const ISphSchema * m_pSorterSchema = nullptr;
CSphVector<ContextCalcItem_t> m_dCalcPrefilter;
CSphVector<ContextCalcItem_t> m_dCalcPresort;
CSphVector<ContextCalcItem_t> m_dAggregates;
MatchCache_c m_tCache;
bool m_bCacheOk = true;
std::unique_ptr<BYTE[]> m_pNullMask;
uint64_t m_uNullMask = 0;
bool m_bErrorFlag = false;
CSphString m_sErrorMessage;
bool SetupJoinQuery ( int iDynamicSize, CSphString & sError );
bool SetupJoinSorter ( CSphString & sError );
void SetupJoinAttrRemap();
void SetupDependentAttrCalc ( const IntVec_t & dJoinedAttrs );
void SetupSorterSchema();
void SetupNullMask();
void SetupAggregates();
FORCE_INLINE uint64_t SetupJoinFilters ( const CSphMatch & tEntry );
bool SetupRightFilters ( CSphString & sError );
bool SetupOnFilters ( CSphString & sError );
void AddToAttrRemap ( const CSphString & sFrom, const CSphString & sTo );
void AddToJoinSelectList ( const CSphString & sExpr );
void AddToJoinSelectList ( const CSphString & sExpr, const CSphString & sAlias );
void AddToJoinSelectList ( const CSphString & sExpr, const CSphString & sAlias, const char * szRemapPrefix );
void AddToJoinSelectList ( const CSphString & sExpr, const CSphString & sAlias, int iSorterAttrId, bool bConvertJsonType=false );
void AddOnFilterToFilterTree ( int iFilterId );
void AddStarItemsToJoinSelectList();
void AddQueryItemsToJoinSelectList();
void AddGroupbyItemsToJoinSelectList();
void AddRemappedStringItemsToJoinSelectList();
void AddExpressionItemsToJoinSelectList();
void AddDocidToJoinSelectList();
void SetupJoinSelectList();
void RepackJsonFieldAsStr ( const CSphMatch & tSrcMatch, const CSphAttrLocator & tLocSrc, const CSphAttrLocator & tLocDst );
void ProduceCacheSizeWarning ( CSphString & sWarning );
void PopulateStoredFields();
};
JoinSorter_c::JoinSorter_c ( const CSphIndex * pIndex, const CSphIndex * pJoinedIndex, const CSphQuery & tQuery, ISphMatchSorter * pSorter, bool bJoinedGroupSort )
: JoinSorter_c ( pIndex, pJoinedIndex, { &tQuery, 1 }, pSorter, bJoinedGroupSort )
{}
JoinSorter_c::JoinSorter_c ( const CSphIndex * pIndex, const CSphIndex * pJoinedIndex, const VecTraits_T<const CSphQuery> & dQueries, ISphMatchSorter * pSorter, bool bJoinedGroupSort )
: m_pIndex ( pIndex )
, m_pJoinedIndex ( pJoinedIndex )
, m_tQuery ( dQueries.First() )
, m_dQueries ( dQueries )
, m_pSorter ( pSorter )
, m_tCache ( GetJoinCacheSize() )
{
assert ( pIndex && pJoinedIndex && pSorter );
const ISphSchema & tSorterSchema = *m_pSorter->GetSchema();
bool bHaveAggregates = false;
for ( int i = 0; i < tSorterSchema.GetAttrsCount(); i++ )
bHaveAggregates |= tSorterSchema.GetAttr(i).m_eAggrFunc!=SPH_AGGR_NONE;
CSphVector<std::pair<int,bool>> dRightFilters = FetchJoinRightTableFilters ( m_tQuery.m_dFilters, tSorterSchema, m_tQuery.m_sJoinIdx.cstr() );
bool bDisableByImplicitGrouping = HasImplicitGrouping(m_tQuery) && m_tQuery.m_eJoinType!=JoinType_e::LEFT;
m_bFinalCalcOnly = !bJoinedGroupSort && !bHaveAggregates && !dRightFilters.GetLength() && !NeedToMoveMixedJoinFilters ( m_tQuery, tSorterSchema ) && !pSorter->IsPrecalc() && !bDisableByImplicitGrouping;
m_bErrorFlag = !SetupJoinQuery ( m_pSorter->GetSchema()->GetDynamicSize(), m_sErrorMessage );
}
void JoinSorter_c::SetBlobPool ( const BYTE * pBlobPool )
{
m_pBlobPool = pBlobPool;
m_pSorter->SetBlobPool(pBlobPool);
m_tMixedFilter.SetBlobPool(pBlobPool);
}
void JoinSorter_c::SetColumnar ( columnar::Columnar_i * pColumnar )
{
m_pSorter->SetColumnar(pColumnar);
m_tMixedFilter.SetColumnar(pColumnar);
}
void JoinSorter_c::SetSchema ( ISphSchema * pSchema, bool bRemapCmp )
{
m_pSorter->SetSchema ( pSchema, bRemapCmp );
m_bErrorFlag = !SetupJoinQuery ( pSchema->GetDynamicSize(), m_sErrorMessage );
}
void JoinSorter_c::SetupSorterSchema()
{
m_pSorterSchema = m_pSorter->GetSchema();
assert ( m_pSorterSchema );
m_pAttrNullBitmask = m_pSorterSchema->GetAttr ( GetNullMaskAttrName() );
}
void JoinSorter_c::SetupNullMask()
{
if ( !m_pAttrNullBitmask )
return;
if ( m_pAttrNullBitmask->m_eAttrType==SPH_ATTR_STRINGPTR )
{
int iNumJoinAttrs = 0;
int iDynamic = 0;
for ( int i = 0; i < m_pSorterSchema->GetAttrsCount(); i++ )
{
const auto & tAttr = m_pSorterSchema->GetAttr(i);
if ( !tAttr.m_tLocator.m_bDynamic )
continue;
iDynamic++;
if ( tAttr.IsJoined() )
iNumJoinAttrs = Max ( iNumJoinAttrs, iDynamic );
}
BitVec_T<BYTE> tMask(iNumJoinAttrs);
iDynamic = 0;
for ( int i = 0; i < m_pSorterSchema->GetAttrsCount(); i++ )
{
const auto & tAttr = m_pSorterSchema->GetAttr(i);
if ( !tAttr.m_tLocator.m_bDynamic )
continue;
if ( tAttr.IsJoined() )
tMask.BitSet(iDynamic);
iDynamic++;
}
m_pNullMask = std::unique_ptr<BYTE[]>( sphPackPtrAttr ( { tMask.Begin(), tMask.GetSizeBytes() } ) );
m_uNullMask = (uint64_t)m_pNullMask.get();
return;
}
// we keep null flags only for attributes with a dynamic locator
// and these attributes need to be from the right table
m_uNullMask = 0;
int iDynamic = 0;
for ( int i = 0; i < m_pSorterSchema->GetAttrsCount(); i++ )
{
const auto & tAttr = m_pSorterSchema->GetAttr(i);
if ( !tAttr.m_tLocator.m_bDynamic )
continue;
if ( tAttr.IsJoined() )
m_uNullMask |= 1ULL << iDynamic;
iDynamic++;
}
}
void JoinSorter_c::SetupAggregates()
{
for ( int i = 0; i < m_pSorterSchema->GetAttrsCount(); i++ )
{
const auto & tAttr = m_pSorterSchema->GetAttr(i);
if ( tAttr.m_eAggrFunc!=SPH_AGGR_NONE && tAttr.m_eStage==SPH_EVAL_SORTER && GetJoinAttrName ( tAttr.m_sName, CSphString ( m_pJoinedIndex->GetName() ) ) )
m_dAggregates.Add ( { tAttr.m_tLocator, tAttr.m_eAttrType, tAttr.m_pExpr } );
}
}
bool JoinSorter_c::SetupJoinQuery ( int iDynamicSize, CSphString & sError )
{
m_pJoinQueryParser = std::unique_ptr<QueryParser_i>( m_tQuery.m_pQueryParser->Clone() );
m_tJoinQuery.m_pQueryParser = m_pJoinQueryParser.get();
m_tJoinQuery.m_eQueryType = m_tQuery.m_eQueryType;
m_tJoinQuery.m_iLimit = DEFAULT_MAX_MATCHES;
m_tJoinQuery.m_iCutoff = 0;
m_tJoinQuery.m_sQuery = m_tJoinQuery.m_sRawQuery = m_tQuery.m_sJoinQuery;
m_tMatch.Reset ( iDynamicSize );
SetupSorterSchema();
SetupJoinSelectList();
if ( !SetupRightFilters(sError) ) return false;
if ( !SetupOnFilters(sError) ) return false;
if ( !SetupJoinSorter(sError) ) return false;
SetupNullMask();
SetupAggregates();
m_iDynamicSize = iDynamicSize;
return true;
}
bool JoinSorter_c::SetupJoinSorter ( CSphString & sError )
{
SphQueueSettings_t tQueueSettings ( m_pJoinedIndex->GetMatchSchema() );
tQueueSettings.m_bComputeItems = true;
SphQueueRes_t tRes;
m_pRightSorter = std::unique_ptr<ISphMatchSorter> ( sphCreateQueue ( tQueueSettings, m_tJoinQuery, sError, tRes ) );
if ( !m_pRightSorter )
return false;
m_pRightSorterRsetSchema = std::unique_ptr<ISphSchema> ( m_pRightSorter->GetSchema()->CloneMe() );
assert(m_pRightSorterRsetSchema);
return true;
}
void JoinSorter_c::SetupDependentAttrCalc ( const IntVec_t & dJoinedAttrs )
{
// find expressions that depend on joined attrs
for ( int i = 0; i < m_pSorterSchema->GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = m_pSorterSchema->GetAttr(i);
if ( !tAttr.m_pExpr )
continue;
StrVec_t dDeps;
dDeps.Add ( tAttr.m_sName );
FetchAttrDependencies ( dDeps, *m_pSorterSchema );
bool bFound = false;
for ( auto iJoinedAttr : dJoinedAttrs )
for ( const auto & sDep : dDeps )
bFound |= m_pSorterSchema->GetAttr(iJoinedAttr).m_sName==sDep;
if ( !bFound )
continue;
switch ( m_pSorterSchema->GetAttr(i).m_eStage )
{
case SPH_EVAL_PREFILTER:
m_dCalcPrefilter.Add ( { tAttr.m_tLocator, tAttr.m_eAttrType, tAttr.m_pExpr } );
break;
case SPH_EVAL_PRESORT:
m_dCalcPresort.Add ( { tAttr.m_tLocator, tAttr.m_eAttrType, tAttr.m_pExpr } );
break;
default:
break;
}
}
}
void JoinSorter_c::SetupJoinAttrRemap()
{
m_dJoinRemap.Resize(0);
IntVec_t dJoinedAttrs;
auto * pSorterSchema = m_pSorter->GetSchema();
auto * pJoinSorterSchema = m_pRightSorter->GetSchema();
for ( int i = 0; i < pJoinSorterSchema->GetAttrsCount(); i++ )
{
auto & tAttrSrc = pJoinSorterSchema->GetAttr(i);
const JoinAttrNameRemap_t * pFound = nullptr;
for ( const auto & tRemap : m_dAttrRemap )
if ( tRemap.m_sFrom==tAttrSrc.m_sName )
{
pFound = &tRemap;
break;
}
if ( !pFound )
continue;
for ( const auto & sDstAttr : pFound->m_dTo )
{
int iDstAttr = pSorterSchema->GetAttrIndex ( sDstAttr.cstr() );
assert ( iDstAttr>=0 );
dJoinedAttrs.Add(iDstAttr);
const CSphColumnInfo & tAttrDst = pSorterSchema->GetAttr(iDstAttr);
bool bJsonRepack = tAttrDst.m_sName.Begins ( GetInternalJsonPrefix() ) || tAttrDst.m_sName.Begins ( GetInternalAttrPrefix() );
m_dJoinRemap.Add ( { tAttrSrc.m_tLocator, tAttrDst.m_tLocator, bJsonRepack } );
}
}
SetupDependentAttrCalc(dJoinedAttrs);
m_bNeedToSetupRemap = false;
}
FORCE_INLINE void SetExprBlobPool ( const CSphVector<ContextCalcItem_t> & dItems, const BYTE * pBlobPool )
{
for ( const auto & i : dItems )
i.m_pExpr->Command ( SPH_EXPR_SET_BLOB_POOL, (void*)pBlobPool );
}
template <typename PUSH>
bool JoinSorter_c::PushJoinedMatches ( const CSphMatch & tEntry, PUSH && fnPush )
{
SetExprBlobPool ( m_dCalcPrefilter, m_pBlobPool );
SetExprBlobPool ( m_dCalcPresort, m_pBlobPool );
SetExprBlobPool ( m_dAggregates, m_pBlobPool );
bool bAnythingPushed = false;
ARRAY_FOREACH ( iMatch, m_dMatches )
{
memcpy ( m_tMatch.m_pDynamic, tEntry.m_pDynamic, m_iDynamicSize*sizeof(CSphRowitem) );
auto & tMatchFromRset = m_dMatches[iMatch];
for ( auto & i : m_dJoinRemap )
{
if ( i.m_bJsonRepack )
RepackJsonFieldAsStr ( tMatchFromRset, i.m_tLocSrc, i.m_tLocDst );
else
m_tMatch.SetAttr ( i.m_tLocDst, tMatchFromRset.GetAttr ( i.m_tLocSrc ) );
}
CalcContextItems ( m_tMatch, m_dCalcPrefilter );
if ( !m_tMixedFilter.Eval(m_tMatch) )
continue;
CalcContextItems ( m_tMatch, m_dCalcPresort );
CalcContextItems ( m_tMatch, m_dAggregates );
bAnythingPushed |= fnPush(m_tMatch);
// clear repacked json
for ( auto & i : m_dJoinRemap )
if ( i.m_bJsonRepack )
{
auto pValue = (BYTE *)m_tMatch.GetAttr(i.m_tLocDst);
SafeDeleteArray(pValue);
}
}
return bAnythingPushed;
}
template <typename PUSH>
bool JoinSorter_c::PushLeftMatch ( const CSphMatch & tEntry, PUSH && fnPush )
{
// no matches with null values from right table if we have a MATCH() for the right table
if ( !m_tQuery.m_sJoinQuery.IsEmpty() )
return false;
memcpy ( m_tMatch.m_pDynamic, tEntry.m_pDynamic, m_iDynamicSize*sizeof(CSphRowitem) );
// set NULL bitmask
assert(m_pAttrNullBitmask);
m_tMatch.SetAttr ( m_pAttrNullBitmask->m_tLocator, m_uNullMask );
if ( !m_tMixedFilter.Eval(m_tMatch) )
return false;
CalcContextItems ( m_tMatch, m_dAggregates );
return fnPush(m_tMatch);
}
void JoinSorter_c::RepackJsonFieldAsStr ( const CSphMatch & tSrcMatch, const CSphAttrLocator & tLocSrc, const CSphAttrLocator & tLocDst )
{
auto pValue = (BYTE *)m_tMatch.GetAttr(tLocDst);
SafeDeleteArray(pValue);
SphAttr_t tJsonFieldPtr = tSrcMatch.GetAttr(tLocSrc);
if ( !tJsonFieldPtr )
return;
const BYTE * pVal = (BYTE *)tJsonFieldPtr;
auto tBlob = sphUnpackPtrAttr(pVal);
pVal = tBlob.first;
ESphJsonType eJson = (ESphJsonType)*pVal++;
CSphString sResult = FormatJsonAsSortStr ( pVal, eJson );
int iStrLen = sResult.Length();
BYTE * pData = nullptr;
SphAttr_t uValue = (SphAttr_t) sphPackPtrAttr ( iStrLen+1, &pData );
memcpy ( pData, sResult.cstr(), iStrLen+1 );
m_tMatch.SetAttr ( tLocDst, uValue );
}
template <typename PUSH>
bool JoinSorter_c::Push_T ( const CSphMatch & tEntry, PUSH && fnPush )
{
if ( m_bFinalCalcOnly )
return fnPush(tEntry);
if ( m_bErrorFlag )
return false;
bool bInCache = true;
uint64_t uJoinOnFilterHash = SetupJoinFilters(tEntry);
if ( !m_tCache.Fetch ( uJoinOnFilterHash, m_dMatches ) )
{
CSphQueryResultMeta tMeta;
CSphQueryResult tQueryResult;
tQueryResult.m_pMeta = &tMeta;
// restore non-standalone schema
// FIXME!!!! make a SetSchema that does not take ownership of the schema
m_pRightSorter->SetSchema ( m_pRightSorterRsetSchema->CloneMe(), true );
CSphMultiQueryArgs tArgs(1);
ISphMatchSorter * pSorter = m_pRightSorter.get();
if ( !m_pJoinedIndex->MultiQuery ( tQueryResult, m_tJoinQuery, { &pSorter, 1 }, tArgs ) )
{
m_bErrorFlag = true;
m_sErrorMessage.SetSprintf ( "joined table %s: %s", m_pJoinedIndex->GetName(), tMeta.m_sError.cstr() );
return false;
}
m_dMatches.Resize(0);
// setup join attr remap, but do it only once
// we can't do that before because we need to remap from the standalone schema and we get it only after the first query
if ( m_bNeedToSetupRemap )
SetupJoinAttrRemap();
if ( pSorter->GetLength() )
{
int iCopied = pSorter->Flatten ( m_dMatches.AddN ( pSorter->GetLength() ) );
m_dMatches.Resize(iCopied);
}
m_tCache.SetSchema ( pSorter->GetSchema() );
bInCache = m_tCache.Add ( uJoinOnFilterHash, m_dMatches );
m_bCacheOk &= bInCache;
}
auto tScopedReset = AtScopeExit ( [this, bInCache]
{
if ( bInCache )
{
for ( auto & i : m_dMatches )
i.m_pDynamic = nullptr;
}
else
{
const ISphSchema * pTransformedRightSchema = m_pRightSorter->GetSchema();
for ( auto & i : m_dMatches )
{
pTransformedRightSchema->FreeDataPtrs(i);
i.ResetDynamic();
}
}
} );
CSphRowitem * pDynamic = m_tMatch.m_pDynamic;
memcpy ( &m_tMatch, &tEntry, sizeof(m_tMatch) );
m_tMatch.m_pDynamic = pDynamic;
bool bAnythingPushed = PushJoinedMatches ( tEntry, fnPush );
if ( !m_dMatches.GetLength() && m_tQuery.m_eJoinType==JoinType_e::LEFT )
return PushLeftMatch ( tEntry, fnPush );
return bAnythingPushed;
}
void JoinSorter_c::Push ( const VecTraits_T<const CSphMatch> & dMatches )
{
for ( auto & i : dMatches )
Push(i);
}
void JoinSorter_c::ProduceCacheSizeWarning ( CSphString & sWarning )
{
if ( !m_bCacheOk )
sWarning.SetSprintf ( "Join cache overflow detected; increase join_cache_size to improve performance" );
}
void JoinSorter_c::PopulateStoredFields()
{
StoredFetch_c tCalc ( *m_pSorterSchema, *m_pJoinedIndex, m_uNullMask );
if ( tCalc.HasFieldToFetch() )
m_pSorter->Finalize ( tCalc, false, false );
}
ISphMatchSorter * JoinSorter_c::Clone() const
{
ISphMatchSorter * pSourceSorter = m_pOriginalSorter ? m_pOriginalSorter.get() : m_pSorter.get();
return new JoinSorter_c ( m_pIndex, m_pJoinedIndex, m_dQueries, pSourceSorter->Clone(), !m_bFinalCalcOnly );
}
bool JoinSorter_c::FinalizeJoin ( CSphString & sError, CSphString & sWarning )
{
if ( !m_bFinalCalcOnly )
{
PopulateStoredFields();
ProduceCacheSizeWarning(sWarning);
if ( m_bErrorFlag )
{
sError = m_sErrorMessage;
return false;
}
return true;
}
// keep the original underlying sorter in case other threads want to clone it
assert ( !m_pOriginalSorter );
m_pOriginalSorter = std::move(m_pSorter);
// replace underlying sorter with a new one
// and fill it with matches that we already have
m_pSorter = std::unique_ptr<ISphMatchSorter> ( m_pOriginalSorter->Clone() );
SetupSorterSchema();
m_bFinalCalcOnly = false;
if ( m_pOriginalSorter->IsGroupby() )
{
MatchCalcGrouped_c tCalc(this);
m_pOriginalSorter->Finalize ( tCalc, false, false );
}
else
{
MatchCalc_c tCalc(this);
m_pOriginalSorter->Finalize ( tCalc, false, false );
}
PopulateStoredFields();
ProduceCacheSizeWarning(sWarning);
if ( m_bErrorFlag )
{
sError = m_sErrorMessage;
return false;
}
return true;
}
static void RemoveTableNamePrefix ( CSphString & sAttr, const CSphFilterSettings & tFilter, const CSphString & sPrefix )
{
int iPrefixLen = sPrefix.Length();
sAttr = tFilter.m_sAttrName.SubString ( iPrefixLen, tFilter.m_sAttrName.Length() - iPrefixLen );
}
bool JoinSorter_c::SetupRightFilters ( CSphString & sError )
{
m_tJoinQuery.m_dFilters.Resize(0);
CSphVector<std::pair<int,bool>> dRightFilters = FetchJoinRightTableFilters ( m_tQuery.m_dFilters, *m_pSorterSchema, m_pJoinedIndex->GetName() );
bool bLeftJoin = m_tQuery.m_eJoinType==JoinType_e::LEFT;
if ( bLeftJoin || m_tQuery.m_dFilterTree.GetLength() )
{
if ( !dRightFilters.GetLength() )
return true;
if ( bLeftJoin || dRightFilters.GetLength()!=m_tQuery.m_dFilters.GetLength() )
{
CreateFilterContext_t tCtx;
tCtx.m_pFilters = &m_tQuery.m_dFilters;
tCtx.m_pFilterTree = &m_tQuery.m_dFilterTree;
tCtx.m_pMatchSchema = m_pSorterSchema;
tCtx.m_pIndexSchema = &m_pIndex->GetMatchSchema();
tCtx.m_bScan = m_tQuery.m_sQuery.IsEmpty();
tCtx.m_sJoinIdx = m_pJoinedIndex->GetName();
if ( !sphCreateFilters ( tCtx, sError, sError ) )
{
sError.SetSprintf ( "failed to create query filters: %s", sError.cstr() );
return false;
}
m_tMixedFilter.SetFilter ( tCtx.m_pFilter );
return true;
}
m_tJoinQuery.m_dFilterTree = m_tQuery.m_dFilterTree;
}
CSphString sPrefix;
sPrefix.SetSprintf ( "%s.", m_pJoinedIndex->GetName() );
ARRAY_FOREACH ( i, dRightFilters )
{
const auto & tFilter = m_tQuery.m_dFilters[dRightFilters[i].first];
if ( tFilter.m_eType==SPH_FILTER_NULL )
continue;
m_tJoinQuery.m_dFilters.Add(tFilter);
if ( dRightFilters[i].second )
RemoveTableNamePrefix ( m_tJoinQuery.m_dFilters.Last().m_sAttrName, tFilter, sPrefix );
}
return true;
}
void JoinSorter_c::AddOnFilterToFilterTree ( int iFilterId )
{
if ( !m_tJoinQuery.m_dFilterTree.GetLength() )
return;
int iRootNodeId = m_tJoinQuery.m_dFilterTree.GetLength()-1;
FilterTreeItem_t & tFilter = m_tJoinQuery.m_dFilterTree.Add();
tFilter.m_iFilterItem = iFilterId;
int iFilterNodeId = m_tJoinQuery.m_dFilterTree.GetLength()-1;
FilterTreeItem_t & tAnd = m_tJoinQuery.m_dFilterTree.Add();
tAnd.m_iLeft = iRootNodeId;
tAnd.m_iRight = iFilterNodeId;
}
bool JoinSorter_c::SetupOnFilters ( CSphString & sError )
{
for ( auto & tOnFilter : m_tQuery.m_dOnFilters )
{
CSphFilterSettings & tFilter = m_tJoinQuery.m_dFilters.Add();
tFilter.m_dValues.Resize(0);
tFilter.m_dStrings.Resize(0);
CSphString sAttrIdx1 = tOnFilter.m_sAttr1;
CSphString sAttrIdx2 = tOnFilter.m_sAttr2;
CSphString sIdx1 = tOnFilter.m_sIdx1;
CSphString sIdx2 = tOnFilter.m_sIdx2;
if ( tOnFilter.m_sIdx1==m_pJoinedIndex->GetName() )
{
assert ( tOnFilter.m_sIdx2==m_pIndex->GetName() );
Swap ( sAttrIdx1, sAttrIdx2 );
Swap ( sIdx1, sIdx2 );
}
// FIXME! handle compound names for left table (e.g. 'table1.id')
const CSphColumnInfo * pAttr1 = m_pSorter->GetSchema()->GetAttr ( sAttrIdx1.cstr() );
assert(pAttr1);
// maybe it is a stored field?
if ( pAttr1->m_eAttrType==SPH_ATTR_STRINGPTR && pAttr1->m_eStage==SPH_EVAL_POSTLIMIT )
{
sError.SetSprintf ( "Unable to perform join on a stored field '%s.%s'", sIdx1.cstr(), pAttr1->m_sName.cstr() );
return false;
}
const CSphColumnInfo * pAttr2 = m_pJoinedIndex->GetMatchSchema().GetAttr ( sAttrIdx2.cstr() );
if ( pAttr2 && pAttr2->m_eAttrType==SPH_ATTR_STRINGPTR && pAttr2->m_eStage==SPH_EVAL_POSTLIMIT )
{
sError.SetSprintf ( "Unable to perform join on a stored field '%s.%s'", sIdx2.cstr(), pAttr2->m_sName.cstr() );
return false;
}
if ( !pAttr2 && !sphJsonNameSplit ( sAttrIdx2.cstr() ) )
{
sError.SetSprintf ( "joined table %s: unknown column: %s", sIdx2.cstr(), sAttrIdx2.cstr() );
return false;
}
bool bStringFilter = pAttr1->m_eAttrType==SPH_ATTR_STRING;
tFilter.m_sAttrName = sAttrIdx2;
tFilter.m_eType = bStringFilter ? SPH_FILTER_STRING : SPH_FILTER_VALUES;
int iFilterId = m_tJoinQuery.m_dFilters.GetLength()-1;
m_dFilterRemap.Add ( { iFilterId, pAttr1->m_tLocator, bStringFilter } );
if ( bStringFilter )
tFilter.m_dStrings.Resize(1);
else
tFilter.m_dValues.Resize(1);
AddOnFilterToFilterTree(iFilterId);
}
return true;
}
uint64_t JoinSorter_c::SetupJoinFilters ( const CSphMatch & tEntry )
{
uint64_t uHash = 0;
ARRAY_FOREACH ( i, m_dFilterRemap )
{
const auto & tRemap = m_dFilterRemap[i];
auto & tFilter = m_tJoinQuery.m_dFilters[tRemap.m_iFilterId];
if ( tRemap.m_bBlob )
{
ByteBlob_t tBlob = tEntry.FetchAttrData ( tRemap.m_tLocator, m_pBlobPool );
tFilter.m_dStrings[0] = CSphString ( (const char*)tBlob.first, tBlob.second );
uHash = HashWithSeed ( tBlob.first, tBlob.second, uHash );
}
else
{
SphAttr_t tValue = tEntry.GetAttr ( tRemap.m_tLocator );
tFilter.m_dValues[0] = tValue;
uHash = HashWithSeed ( &tValue, sizeof(tValue), uHash );
}
}
return uHash;
}
void JoinSorter_c::AddToAttrRemap ( const CSphString & sFrom, const CSphString & sTo )
{
for ( auto & i : m_dAttrRemap )
if ( i.m_sFrom==sFrom )
{
for ( const auto & j : i.m_dTo )
if ( j==sTo )
return;
i.m_dTo.Add(sTo);
return;
}
JoinAttrNameRemap_t & tNew = m_dAttrRemap.Add();
tNew.m_sFrom = sFrom;
tNew.m_dTo.Add(sTo);
}
void JoinSorter_c::AddToJoinSelectList ( const CSphString & sExpr, const CSphString & sAlias, int iSorterAttrId, bool bConvertJsonType )
{
if ( iSorterAttrId==-1 )
return;
if ( sExpr=="*" || sAlias=="*" )
return;
CSphString sJoinExpr;
if ( !GetJoinAttrName ( sExpr, CSphString ( m_pJoinedIndex->GetName() ), &sJoinExpr ) )
return;
const CSphColumnInfo & tSorterAttr = m_pSorterSchema->GetAttr(iSorterAttrId);
if ( bConvertJsonType )
{
switch ( tSorterAttr.m_eAttrType )
{
case SPH_ATTR_STRING:
case SPH_ATTR_STRINGPTR:
sJoinExpr.SetSprintf ( "to_string(%s)", sJoinExpr.cstr() );
break;
case SPH_ATTR_FLOAT:
sJoinExpr.SetSprintf ( "double(%s)", sJoinExpr.cstr() );
break;
default:
sJoinExpr.SetSprintf ( "bigint(%s)", sJoinExpr.cstr() );
break;
}
}
CSphString sJoinAlias = sExpr==sAlias ? sJoinExpr : sAlias;
AddToAttrRemap ( sJoinAlias, tSorterAttr.m_sName );
// don't add duplicates to select list items
for ( const auto & i : m_tJoinQuery.m_dItems )
if ( i.m_sExpr==sJoinExpr && i.m_sAlias==sJoinAlias )
return;
auto & tItem = m_tJoinQuery.m_dItems.Add();
tItem.m_sExpr = sJoinExpr;
tItem.m_sAlias = sJoinAlias;
}
void JoinSorter_c::AddToJoinSelectList ( const CSphString & sExpr, const CSphString & sAlias )
{
int iSorterAttrId = m_pSorterSchema->GetAttrIndex ( sExpr.cstr() );
if ( iSorterAttrId==-1 )
iSorterAttrId = m_pSorterSchema->GetAttrIndex ( sAlias.cstr() );
AddToJoinSelectList ( sExpr, sAlias, iSorterAttrId );
}
void JoinSorter_c::AddToJoinSelectList ( const CSphString & sExpr )
{
AddToJoinSelectList ( sExpr, sExpr, m_pSorterSchema->GetAttrIndex ( sExpr.cstr() ), true );
}
void JoinSorter_c::AddToJoinSelectList ( const CSphString & sExpr, const CSphString & sAlias, const char * szRemapPrefix )
{
// maybe it's a JSON attr?
if ( !sphJsonNameSplit ( sExpr.cstr(), m_pJoinedIndex->GetName() ) )
return;
// try remapped groupby json attr
CSphString sRemapped;
sRemapped.SetSprintf ( "%s%s", szRemapPrefix, sExpr.cstr() );
AddToJoinSelectList ( sExpr, sAlias, m_pSorterSchema->GetAttrIndex ( sRemapped.cstr() ) );
}
void JoinSorter_c::AddStarItemsToJoinSelectList()
{
const CSphSchema & tJoinedSchema = m_pJoinedIndex->GetMatchSchema();
bool bHaveStar = m_tQuery.m_dItems.any_of ( []( const CSphQueryItem & tItem ) { return tItem.m_sExpr=="*" || tItem.m_sAlias=="*"; } );
if ( bHaveStar )
{
for ( int i = 0; i < tJoinedSchema.GetAttrsCount(); i++ )
{
auto & tAttr = tJoinedSchema.GetAttr(i);
if ( sphIsInternalAttr(tAttr) )
continue;
CSphString sAttrName;
sAttrName.SetSprintf ( "%s.%s", m_pJoinedIndex->GetName(), tAttr.m_sName.cstr() );
AddToJoinSelectList ( sAttrName, sAttrName );
}
}
}
void JoinSorter_c::AddQueryItemsToJoinSelectList()
{
for ( const auto & i : m_tQuery.m_dItems )
AddToJoinSelectList ( i.m_sExpr, i.m_sAlias );
}
void JoinSorter_c::AddGroupbyItemsToJoinSelectList()
{
for ( const auto & tQuery : m_dQueries )
{
if ( !tQuery.m_sGroupBy.IsEmpty() )
{
StrVec_t dGroupby = ParseGroupBy ( tQuery.m_sGroupBy );
for ( const auto & i : dGroupby )
{
AddToJoinSelectList ( i, i );
AddToJoinSelectList ( i, i, GetInternalJsonPrefix() ); // try to add as json attr
}
}
if ( !tQuery.m_sGroupDistinct.IsEmpty() )
AddToJoinSelectList ( tQuery.m_sGroupDistinct, tQuery.m_sGroupDistinct );
}
}
void JoinSorter_c::AddRemappedStringItemsToJoinSelectList()
{
auto * pSorterSchema = m_pSorter->GetSchema();
assert(pSorterSchema);
for ( int i = 0; i < pSorterSchema->GetAttrsCount(); i++ )
{
auto & tAttr = pSorterSchema->GetAttr(i);
if ( sphIsInternalAttr(tAttr) )
continue;
CSphString sName = tAttr.m_sName;
if ( IsSortStringInternal ( sName.cstr() ) )
{
int iPrefixLen = strlen ( GetInternalAttrPrefix() );
CSphString sJoinedAttrName = sName.cstr()+iPrefixLen;
AddToJoinSelectList ( sJoinedAttrName, sJoinedAttrName, GetInternalAttrPrefix() );
}
if ( IsSortJsonInternal ( sName.cstr() ) )
{
int iPrefixLen = strlen ( GetInternalJsonPrefix() );
CSphString sJoinedAttrName = sName.cstr()+iPrefixLen;
AddToJoinSelectList ( sJoinedAttrName, sJoinedAttrName, GetInternalJsonPrefix() );
}
}
}
void JoinSorter_c::AddExpressionItemsToJoinSelectList()
{
// find JSON/columnar attrs present in filters and add them to select list (only when all filters are moved to the left query)
if ( !NeedToMoveMixedJoinFilters ( m_tQuery, *m_pSorterSchema ) )
return;
const CSphSchema & tJoinedSchema = m_pJoinedIndex->GetMatchSchema();
for ( const auto & i : m_tQuery.m_dFilters )
{
if ( sphJsonNameSplit ( i.m_sAttrName.cstr(), m_pJoinedIndex->GetName() ) )
{
AddToJoinSelectList ( i.m_sAttrName );
continue;
}
CSphString sJoinedAttr;
if ( GetJoinAttrName ( i.m_sAttrName, CSphString ( m_pJoinedIndex->GetName() ), &sJoinedAttr ) )
{
const CSphColumnInfo * pAttr = tJoinedSchema.GetAttr ( sJoinedAttr.cstr() );
if ( pAttr && pAttr->IsColumnar() )
AddToJoinSelectList ( i.m_sAttrName, i.m_sAttrName );
}
}
}
void JoinSorter_c::AddDocidToJoinSelectList()
{
// fetch docid; we need it for docstore queries
CSphString sId;
sId.SetSprintf ( "%s.%s", m_pJoinedIndex->GetName(), sphGetDocidName() );
AddToJoinSelectList ( sId, sId );
}
void JoinSorter_c::SetupJoinSelectList()
{
m_tJoinQuery.m_dItems.Resize(0);
m_dAttrRemap.Reset();
AddStarItemsToJoinSelectList();
AddQueryItemsToJoinSelectList();
AddGroupbyItemsToJoinSelectList();
AddRemappedStringItemsToJoinSelectList();
AddExpressionItemsToJoinSelectList();
AddDocidToJoinSelectList();
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class JoinMultiSorter_c : public JoinSorter_c
{
public:
JoinMultiSorter_c ( const CSphIndex * pIndex, const CSphIndex * pJoinedIndex, const VecTraits_T<CSphQuery> & dQueries, VecTraits_T<ISphMatchSorter *> dSorters );
bool Push ( const CSphMatch & tEntry ) override;
bool PushGrouped ( const CSphMatch & tEntry, bool bNewSet ) override;
// cloning multi-sorter is hard with the current architecture
// as it holds pointers to sorters that also exist in dSorters array in matching/fullscan
// for cloning to work we would need to clone the sorters that we hold and also somehow sync them with dSorters
bool CanBeCloned() const override { return false; }
private:
CSphVector<ISphMatchSorter *> m_dSorters; // we don't own 1..N sorters (JoinSorter_c owns sorter #0)
};
JoinMultiSorter_c::JoinMultiSorter_c ( const CSphIndex * pIndex, const CSphIndex * pJoinedIndex, const VecTraits_T<CSphQuery> & dQueries, VecTraits_T<ISphMatchSorter *> dSorters )
: JoinSorter_c ( pIndex, pJoinedIndex, dQueries, dSorters[0], true )
{
m_dSorters.Resize ( dSorters.GetLength() );
memcpy ( m_dSorters.Begin(), dSorters.Begin(), dSorters.GetLengthBytes() );
}
bool JoinMultiSorter_c::Push ( const CSphMatch & tEntry )
{
return Push_T ( tEntry, [this]( const CSphMatch & tMatch )
{
bool bNew = false;
for ( auto & i : m_dSorters )
bNew |= i->Push(tMatch);
return bNew;
}
);
}
bool JoinMultiSorter_c::PushGrouped ( const CSphMatch & tEntry, bool bNewSet )
{
return Push_T ( tEntry, [this,bNewSet]( const CSphMatch & tMatch )
{
bool bNew = false;
for ( auto & i : m_dSorters )
bNew |= i->PushGrouped ( tMatch, bNewSet );
return bNew;
} );
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class SorterWrapperNoPush_c : public ISphMatchSorter
{
public:
SorterWrapperNoPush_c ( ISphMatchSorter * pSorter ) : m_pSorter ( pSorter ) {}
bool IsGroupby() const override { return m_pSorter->IsGroupby(); }
void SetState ( const CSphMatchComparatorState & tState ) override { m_pSorter->SetState(tState); }
const CSphMatchComparatorState & GetState() const override { return m_pSorter->GetState(); }
void SetGroupState ( const CSphMatchComparatorState & tState ) override { m_pSorter->SetGroupState(tState); }
void SetBlobPool ( const BYTE * pBlobPool ) override { m_pSorter->SetBlobPool(pBlobPool); }
void SetColumnar ( columnar::Columnar_i * pColumnar ) override { m_pSorter->SetColumnar(pColumnar); }
void SetSchema ( ISphSchema * pSchema, bool bRemapCmp ) override { m_pSorter->SetSchema(pSchema, bRemapCmp); }
const ISphSchema * GetSchema() const override { return m_pSorter->GetSchema(); }
bool Push ( const CSphMatch & tEntry ) override { return false; }
void Push ( const VecTraits_T<const CSphMatch> & dMatches ) override {}
bool PushGrouped ( const CSphMatch & tEntry, bool bNewSet ) override { return false; }
int GetLength() override { return m_pSorter->GetLength(); }
int64_t GetTotalCount() const override { return m_pSorter->GetTotalCount(); }
void Finalize ( MatchProcessor_i & tProcessor, bool bCallProcessInResultSetOrder, bool bFinalizeMatches ) override { m_pSorter->Finalize ( tProcessor, bCallProcessInResultSetOrder, bFinalizeMatches ); }
int Flatten ( CSphMatch * pTo ) override { return m_pSorter->Flatten(pTo); }
const CSphMatch * GetWorst() const override { return m_pSorter->GetWorst(); }
bool CanBeCloned() const override { return m_pSorter->CanBeCloned(); }
ISphMatchSorter * Clone() const override { return new SorterWrapperNoPush_c ( m_pSorter->Clone() ); }
void MoveTo ( ISphMatchSorter * pRhs, bool bCopyMeta ) override { m_pSorter->MoveTo ( ((SorterWrapperNoPush_c *)pRhs)->m_pSorter.get(), bCopyMeta ); }
void CloneTo ( ISphMatchSorter * pTrg ) const override { m_pSorter->CloneTo(pTrg); }
void SetFilteredAttrs ( const sph::StringSet & hAttrs, bool bAddDocid ) override { m_pSorter->SetFilteredAttrs(hAttrs, bAddDocid); }
void TransformPooled2StandalonePtrs ( GetBlobPoolFromMatch_fn fnBlobPoolFromMatch, GetColumnarFromMatch_fn fnGetColumnarFromMatch, bool bFinalizeSorters ) override { m_pSorter->TransformPooled2StandalonePtrs(fnBlobPoolFromMatch, fnGetColumnarFromMatch, bFinalizeSorters); }
void SetRandom ( bool bRandom ) override { m_pSorter->SetRandom(bRandom); }
bool IsRandom() const override { return m_pSorter->IsRandom(); }
int GetMatchCapacity() const override { return m_pSorter->GetMatchCapacity(); }
RowTagged_t GetJustPushed() const override { return m_pSorter->GetJustPushed(); }
VecTraits_T<RowTagged_t> GetJustPopped() const override { return m_pSorter->GetJustPopped(); }
bool IsCutoffDisabled() const override { return m_pSorter->IsCutoffDisabled(); }
void SetMerge ( bool bMerge ) override { m_pSorter->SetMerge(bMerge); }
bool IsPrecalc() const override { return m_pSorter->IsPrecalc(); }
private:
std::unique_ptr<ISphMatchSorter> m_pSorter;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool CheckJoinOnFilters ( const CSphIndex * pIndex, const CSphIndex * pJoinedIndex, const CSphQuery & tQuery, CSphString & sError )
{
if ( !tQuery.m_dOnFilters.GetLength() )
{
sError.SetSprintf ( "JOIN ON condition is empty" );
return false;
}
for ( const auto & i : tQuery.m_dOnFilters )
{
if ( i.m_sIdx1!=pIndex->GetName() && i.m_sIdx1!=pJoinedIndex->GetName() )
{
sError.SetSprintf ( "JOIN ON table '%s' not found", i.m_sIdx1.cstr() );
return false;
}
if ( i.m_sIdx2!=pIndex->GetName() && i.m_sIdx2!=pJoinedIndex->GetName() )
{
sError.SetSprintf ( "JOIN ON table '%s' not found", i.m_sIdx2.cstr() );
return false;
}
}
return true;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class JoinNullFilter_c : public ISphFilter
{
public:
JoinNullFilter_c ( bool bIsNull, const CSphAttrLocator & tNullMapLocator );
bool Eval ( const CSphMatch & tMatch ) const override { return (!!tMatch.GetAttr(m_tNullMapLocator)) ^ (!m_bIsNull); }
private:
bool m_bIsNull = false;
CSphAttrLocator m_tNullMapLocator;
};
JoinNullFilter_c::JoinNullFilter_c ( bool bIsNull, const CSphAttrLocator & tNullMapLocator )
: m_bIsNull ( bIsNull )
, m_tNullMapLocator ( tNullMapLocator )
{}
std::unique_ptr<ISphFilter> CreateJoinNullFilter ( const CSphFilterSettings & tSettings, const CSphAttrLocator & tNullMapLocator )
{
return std::make_unique<JoinNullFilter_c> ( tSettings.m_bIsNull, tNullMapLocator );
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ISphMatchSorter * CreateJoinSorter ( const CSphIndex * pIndex, const CSphIndex * pJoinedIndex, const SphQueueSettings_t & tSettings, const CSphQuery & tQuery, ISphMatchSorter * pSorter, bool bJoinedGroupSort, CSphString & sError )
{
if ( !tSettings.m_pJoinArgs )
return pSorter;
if ( !CheckJoinOnFilters ( pIndex, pJoinedIndex, tQuery, sError ) )
{
SafeDelete(pSorter);
return nullptr;
}
std::unique_ptr<JoinSorter_c> pJoinSorter = std::make_unique<JoinSorter_c> ( pIndex, pJoinedIndex, tQuery, pSorter, bJoinedGroupSort );
if ( pJoinSorter->GetErrorFlag() )
{
sError = pJoinSorter->GetErrorMessage();
pJoinSorter.reset();
}
return pJoinSorter.release();
}
bool CreateJoinMultiSorter ( const CSphIndex * pIndex, const CSphIndex * pJoinedIndex, const SphQueueSettings_t & tSettings, const VecTraits_T<CSphQuery> & dQueries, VecTraits_T<ISphMatchSorter *> & dSorters, CSphString & sError )
{
if ( !tSettings.m_pJoinArgs )
return true;
if ( !CheckJoinOnFilters ( pIndex, pJoinedIndex, dQueries.First(), sError ) )
return false;
// the idea is that 1st sorter does the join AND it also pushes joined matches to all other sorters
// to avoid double push to 1..N sorters they are wrapped in a class that prevents pushing matches
std::unique_ptr<JoinMultiSorter_c> pJoinSorter = std::make_unique<JoinMultiSorter_c> ( pIndex, pJoinedIndex, dQueries, dSorters );
if ( pJoinSorter->GetErrorFlag() )
{
sError = pJoinSorter->GetErrorMessage();
pJoinSorter.reset();
}
dSorters[0] = pJoinSorter.release();
for ( int i = 1; i < dSorters.GetLength(); i++ )
{
if ( !dSorters[i] )
continue;
dSorters[i] = new SorterWrapperNoPush_c ( dSorters[i] );
}
return true;
}
| 51,617
|
C++
|
.cpp
| 1,294
| 37.31221
| 275
| 0.709522
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,824
|
columnarlib.cpp
|
manticoresoftware_manticoresearch/src/columnarlib.cpp
|
//
// Copyright (c) 2020-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "columnarlib.h"
#include "sphinxutils.h"
#include "sphinxexpr.h"
#include "libutils.h"
#include "fileutils.h"
#include "schema/columninfo.h"
#include "schema/schema.h"
using CreateStorageReader_fn = columnar::Columnar_i * (*) ( const std::string & sFilename, uint32_t uTotalDocs, std::string & sError );
using CreateBuilder_fn = columnar::Builder_i * (*) ( const common::Schema_t & tSchema, const std::string & sFile, size_t tBufferSize, std::string & sError );
using CheckStorage_fn = void (*) ( const std::string & sFilename, uint32_t uNumRows, std::function<void (const char*)> & fnError, std::function<void (const char*)> & fnProgress );
using VersionStr_fn = const char * (*)();
using GetVersion_fn = int (*)();
static void * g_pColumnarLib = nullptr;
static CreateStorageReader_fn g_fnCreateColumnarStorage = nullptr;
static CreateBuilder_fn g_fnCreateColumnarBuilder = nullptr;
static CheckStorage_fn g_fnCheckColumnarStorage = nullptr;
static VersionStr_fn g_fnVersionStr = nullptr;
/////////////////////////////////////////////////////////////////////
common::AttrType_e ToColumnarType ( ESphAttr eAttrType, int iBitCount )
{
switch ( eAttrType )
{
case SPH_ATTR_NONE: return common::AttrType_e::NONE;
case SPH_ATTR_TOKENCOUNT:
case SPH_ATTR_INTEGER: return iBitCount==1 ? common::AttrType_e::BOOLEAN : common::AttrType_e::UINT32;
case SPH_ATTR_TIMESTAMP: return common::AttrType_e::TIMESTAMP;
case SPH_ATTR_BOOL: return common::AttrType_e::BOOLEAN;
case SPH_ATTR_FLOAT: return common::AttrType_e::FLOAT;
case SPH_ATTR_BIGINT: return iBitCount==1 ? common::AttrType_e::BOOLEAN : common::AttrType_e::INT64;
case SPH_ATTR_STRING: return common::AttrType_e::STRING;
case SPH_ATTR_UINT32SET: return common::AttrType_e::UINT32SET;
case SPH_ATTR_INT64SET: return common::AttrType_e::INT64SET;
case SPH_ATTR_FLOAT_VECTOR: return common::AttrType_e::FLOATVEC;
default:
assert ( 0 && "Unknown columnar type");
return common::AttrType_e::NONE;
}
}
std::unique_ptr<columnar::Columnar_i> CreateColumnarStorageReader ( const CSphString & sFile, DWORD uNumDocs, CSphString & sError )
{
if ( !IsColumnarLibLoaded() )
{
sError = "columnar library not loaded";
return nullptr;
}
std::string sErrorSTL;
assert ( g_fnCreateColumnarStorage );
std::unique_ptr<columnar::Columnar_i> pColumnar { g_fnCreateColumnarStorage ( sFile.cstr(), uNumDocs, sErrorSTL ) };
if ( !pColumnar )
sError = sErrorSTL.c_str();
return pColumnar;
}
std::unique_ptr<columnar::Builder_i> CreateColumnarBuilder ( const ISphSchema & tSchema, const CSphString & sFilename, size_t tBufferSize, CSphString & sError )
{
if ( !IsColumnarLibLoaded() )
{
sError = "columnar library not loaded";
return nullptr;
}
common::Schema_t tColumnarSchema;
std::string sErrorSTL;
// convert our data types to columnar storage data types
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = tSchema.GetAttr(i);
if ( !tAttr.IsColumnar() )
continue;
common::StringHash_fn fnStringCalcHash = nullptr;
common::AttrType_e eAttrType = ToColumnarType ( tAttr.m_eAttrType, tAttr.m_tLocator.m_iBitCount );
// fixme! make default collation configurable
if ( eAttrType==common::AttrType_e::STRING && tAttr.HasStringHashes() )
fnStringCalcHash = LibcCIHash_fn::Hash;
tColumnarSchema.push_back ( { tAttr.m_sName.cstr(), eAttrType, fnStringCalcHash } );
}
if ( tColumnarSchema.empty() )
return nullptr;
assert ( g_fnCreateColumnarBuilder );
std::unique_ptr<columnar::Builder_i> pBuilder { g_fnCreateColumnarBuilder ( tColumnarSchema, sFilename.cstr(), tBufferSize, sErrorSTL ) };
if ( !pBuilder )
sError = sErrorSTL.c_str();
return pBuilder;
}
void CheckColumnarStorage ( const CSphString & sFile, DWORD uNumRows, std::function<void (const char*)> fnError, std::function<void (const char*)> fnProgress )
{
if ( !IsColumnarLibLoaded() )
{
fnError ( "columnar library not loaded" );
return;
}
assert ( g_fnCheckColumnarStorage );
g_fnCheckColumnarStorage ( sFile.cstr(), (uint32_t)uNumRows, fnError, fnProgress );
}
#if HAVE_DLOPEN
bool InitColumnar ( CSphString & sError )
{
assert ( !g_pColumnarLib );
CSphString sLibfile = TryDifferentPaths ( LIB_MANTICORE_COLUMNAR, GetColumnarFullpath(), columnar::LIB_VERSION );
if ( sLibfile.IsEmpty() )
return true;
if ( !IsSSE42Supported() )
{
sError.SetSprintf ( "MCL requires a CPU that supports SSE 4.2" );
return false;
}
ScopedHandle_c tHandle ( dlopen ( sLibfile.cstr(), RTLD_LAZY | RTLD_LOCAL ) );
if ( !tHandle.Get() )
{
const char * szDlError = dlerror();
sError.SetSprintf ( "dlopen() failed: %s", szDlError ? szDlError : "(null)" );
return true; // if dlopen fails, don't report an error
}
sphLogDebug ( "dlopen(%s)=%p", sLibfile.cstr(), tHandle.Get() );
GetVersion_fn fnGetVersion;
if ( !LoadFunc ( fnGetVersion, tHandle.Get(), "GetColumnarLibVersion", sLibfile, sError ) )
return false;
int iLibVersion = fnGetVersion();
if ( iLibVersion!=columnar::LIB_VERSION )
{
sError.SetSprintf ( "daemon requires columnar library v%d (trying to load v%d)", columnar::LIB_VERSION, iLibVersion );
return false;
}
if ( !LoadFunc ( g_fnCreateColumnarStorage, tHandle.Get(), "CreateColumnarStorageReader", sLibfile, sError ) ) return false;
if ( !LoadFunc ( g_fnCreateColumnarBuilder, tHandle.Get(), "CreateColumnarBuilder", sLibfile, sError ) ) return false;
if ( !LoadFunc ( g_fnCheckColumnarStorage, tHandle.Get(), "CheckColumnarStorage", sLibfile, sError ) ) return false;
if ( !LoadFunc ( g_fnVersionStr, tHandle.Get(), "GetColumnarLibVersionStr", sLibfile, sError ) ) return false;
g_pColumnarLib = tHandle.Leak();
return true;
}
void ShutdownColumnar()
{
if ( g_pColumnarLib )
dlclose(g_pColumnarLib);
}
#else
bool InitColumnar ( CSphString & sError )
{
return false;
}
void ShutdownColumnar()
{
}
#endif
const char * GetColumnarVersionStr()
{
if ( !IsColumnarLibLoaded() )
return nullptr;
assert ( g_fnVersionStr );
return g_fnVersionStr();
}
bool IsColumnarLibLoaded()
{
return !!g_pColumnarLib;
}
std::unique_ptr<columnar::Iterator_i> CreateColumnarIterator ( const columnar::Columnar_i * pColumnar, const std::string & sName, std::string & sError, const columnar::IteratorHints_t & tHints, columnar::IteratorCapabilities_t * pCapabilities )
{
return std::unique_ptr<columnar::Iterator_i> { pColumnar->CreateIterator ( sName, tHints, pCapabilities, sError ) };
}
| 6,872
|
C++
|
.cpp
| 166
| 39.319277
| 244
| 0.733904
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,825
|
histogram.cpp
|
manticoresoftware_manticoresearch/src/histogram.cpp
|
//
// Copyright (c) 2018-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "histogram.h"
#include "attribute.h"
#include "sphinxint.h"
#include "conversion.h"
#include <math.h>
#include <algorithm>
template <typename T>
struct HSBucket_T
{
T m_tCentroid;
int m_iCount;
HSBucket_T Merge ( const HSBucket_T & tB )
{
T tDeltaCentroid = tB.m_tCentroid - m_tCentroid;
HSBucket_T<T> tVal;
tVal.m_tCentroid = m_tCentroid + tDeltaCentroid * tB.m_iCount / ( m_iCount + tB.m_iCount );
tVal.m_iCount = m_iCount + tB.m_iCount;
return tVal;
}
bool IsCenterEq ( const HSBucket_T & tB ) const
{
return ( m_tCentroid==tB.m_tCentroid );
}
};
template<> bool HSBucket_T<float>::IsCenterEq ( const HSBucket_T & tB ) const
{
return ( fabs ( m_tCentroid - tB.m_tCentroid )<=FLT_EPSILON );
}
// operators for FindSpan
template <typename T>
bool operator < ( const HSBucket_T<T> & a, T b )
{
return ( a.m_tCentroid<b );
}
template <typename T>
bool operator < ( T a, const HSBucket_T<T> & b )
{
return ( a<b.m_tCentroid);
}
template <typename T>
bool operator == ( const HSBucket_T<T> & a, T b )
{
return ( a.m_tCentroid==b );
}
template<> bool operator == ( const HSBucket_T<float> & a, float b )
{
return ( fabs ( a.m_tCentroid - b )<=FLT_EPSILON );
}
template <typename T>
struct HSQueueItem_T
{
T m_tDelta { 0 };
int m_iId { 0 };
HSQueueItem_T() = default;
HSQueueItem_T ( T tDelta, int iId )
: m_tDelta ( tDelta )
, m_iId ( iId )
{}
static inline bool IsLess ( const HSQueueItem_T & tA, const HSQueueItem_T & tB )
{
if ( tA.m_tDelta==tB.m_tDelta )
return ( tA.m_iId<tB.m_iId );
return ( tA.m_tDelta<tB.m_tDelta );
}
};
template<> bool HSQueueItem_T<float>::IsLess ( const HSQueueItem_T & tA, const HSQueueItem_T & tB )
{
if ( fabs ( tA.m_tDelta-tB.m_tDelta )<=FLT_EPSILON )
return ( tA.m_iId<tB.m_iId );
return ( tA.m_tDelta<tB.m_tDelta );
}
template <typename T>
struct HSListItem_T
{
// list items
HSListItem_T * m_pPrev { nullptr };
HSListItem_T * m_pNext { nullptr };
bool m_bAlive { true };
HSBucket_T<T> * m_pItem { nullptr };
int m_iId { 0 };
static T Delta ( const HSListItem_T & tCur )
{
return ( tCur.m_pNext->m_pItem->m_tCentroid - tCur.m_pItem->m_tCentroid );
}
static void Delete ( HSListItem_T * pNode )
{
if ( !pNode )
return;
if ( pNode->m_pNext )
pNode->m_pNext->m_pPrev = pNode->m_pPrev;
if ( pNode->m_pPrev )
pNode->m_pPrev->m_pNext = pNode->m_pNext;
pNode->m_pNext = nullptr;
pNode->m_pPrev = nullptr;
pNode->m_bAlive = false;
}
};
struct HSBucketTrait_t
{
int m_iBucket = 0;
int m_iCount = 0;
HSBucketTrait_t() = default;
HSBucketTrait_t ( int iBucket, int iCount )
: m_iBucket ( iBucket )
, m_iCount ( iCount )
{}
};
//////////////////////////////////////////////////////////////////////////
template <typename T>
class HistogramStreamed_T : public Histogram_i
{
public:
HistogramStreamed_T ( CSphString sAttr, int iBins );
void Insert ( SphAttr_t tAttrVal ) override;
void Finalize() override;
bool Save ( CSphWriter & tWriter ) const override;
bool Load ( CSphReader & tReader, CSphString & sError ) override;
DWORD GetNumValues() const override { return m_uValues; }
HistogramType_e GetType() const override { return TYPE; }
const CSphString & GetAttrName() const override { return m_sAttr; }
void Delete ( SphAttr_t tAttr ) override;
void UpdateCounter ( SphAttr_t tAttr ) override;
bool IsOutdated() const override;
void Dump ( StringBuilder_c & tOut ) const override;
bool EstimateRsetSize ( const CSphFilterSettings & tFilter, HistogramRset_t & tEstimate ) const override;
int GetSize() const override { return m_iSize; }
private:
static const HistogramType_e TYPE;
static const T MIN_BY_TYPE;
static const T MAX_BY_TYPE;
static const int m_iKbufferFactor = 2;
static const DWORD VERSION = 3;
CSphString m_sAttr;
T m_tMinValue;
T m_tMaxValue;
int m_iMaxBins = 0;
int m_iSize = 0;
DWORD m_uValues = 0;
DWORD m_uOutdated = 0;
CSphFixedVector<HSBucket_T<T>> m_dBuckets { 0 };
bool m_bUpdateMode = false;
DWORD GetLength() const { return Min ( m_iSize, m_iMaxBins ); }
void DumpValue ( const HSBucket_T < T > & tVal, StringBuilder_c & tBuf ) const;
void Push ( T tValue, int iCount );
void Aggregate ( int iBins );
int GetBucket ( T tValue, bool bCounterLess ) const;
int LerpCounter ( int iBucket, T tVal ) const;
HSBucketTrait_t GetBucket ( T tValue ) const;
HistogramRset_t EstimateValues ( bool bExclude, const VecTraits_T<SphAttr_t>& dValues ) const;
HistogramRset_t EstimateRangeFilter ( bool bExclude, bool bHasEqualMin, bool bHasEqualMax, bool bOpenLeft, bool bOpenRight, T tMinValue, T tMaxValue ) const;
T Saturate ( T tVal ) const;
HistogramRset_t EstimateInterval ( T tMin, T tMax, bool bHasEqualMin, bool bHasEqualMax, bool bOpenLeft, bool bOpenRight ) const;
bool IsOutdated ( SphAttr_t tAttr ) const;
void UpdateMinMax();
};
template<typename T>
HistogramStreamed_T<T>::HistogramStreamed_T ( CSphString sAttr, int iBins )
: m_sAttr ( std::move(sAttr) )
, m_iMaxBins ( iBins )
, m_dBuckets ( iBins * m_iKbufferFactor )
{
m_dBuckets.ZeroVec();
UpdateMinMax();
}
template<typename T>
void HistogramStreamed_T<T>::Insert ( SphAttr_t tAttrVal )
{
assert ( !m_bUpdateMode );
Push ( ConvertType<T>(tAttrVal), 1 );
}
template<typename T>
void HistogramStreamed_T<T>::Finalize()
{
Aggregate(m_iMaxBins);
// fold close values like uniq but with merge operations
if ( m_iSize )
{
int iSrc = 1;
int iDst = 1;
while ( iSrc<m_iSize )
{
if ( m_dBuckets[iDst-1].IsCenterEq ( m_dBuckets[iSrc] ) )
{
m_dBuckets[iDst-1] = m_dBuckets[iDst-1].Merge ( m_dBuckets[iSrc] );
iSrc++;
} else
{
m_dBuckets[iDst] = m_dBuckets[iSrc];
iDst++;
iSrc++;
}
}
m_iSize = iDst;
}
UpdateMinMax();
}
template<typename T>
bool HistogramStreamed_T<T>::Save ( CSphWriter & tWriter ) const
{
tWriter.PutDword ( VERSION );
tWriter.PutBytes ( &m_tMinValue, sizeof(T) );
tWriter.PutBytes ( &m_tMaxValue, sizeof(T) );
tWriter.PutDword ( m_iMaxBins );
tWriter.PutDword ( m_iSize );
tWriter.PutDword ( m_uValues );
tWriter.PutDword ( m_uOutdated );
tWriter.PutBytes ( m_dBuckets.Begin(), m_iSize*sizeof(m_dBuckets[0]) );
return true;
}
template<typename T>
bool HistogramStreamed_T<T>::Load ( CSphReader & tReader, CSphString & sError )
{
DWORD uVersion = tReader.GetDword();
if ( uVersion > VERSION )
{
sError.SetSprintf ( "table histogram version (%u) greater than code histogram version (%u)", uVersion, VERSION );
return false;
}
if ( uVersion<=1 )
{
sError.SetSprintf ( "non-streamed histograms are no longer supported" );
return false;
}
tReader.GetBytes ( &m_tMinValue, sizeof(T) );
tReader.GetBytes ( &m_tMaxValue, sizeof(T) );
m_iMaxBins = tReader.GetDword();
m_iSize = tReader.GetDword();
m_uValues = tReader.GetDword();
m_dBuckets.Reset ( m_iSize );
if ( uVersion>=3 )
m_uOutdated = tReader.GetDword();
tReader.GetBytes ( m_dBuckets.Begin(), m_iSize*sizeof(m_dBuckets[0]) );
if ( tReader.GetErrorFlag() )
{
sError = tReader.GetErrorMessage();
return false;
}
// can not insert values after load - only update existed
m_bUpdateMode = true;
return true;
}
template<typename T>
bool HistogramStreamed_T<T>::IsOutdated ( SphAttr_t tAttr ) const
{
T tVal = ConvertType<T> ( tAttr );
return ( tVal<m_tMinValue || m_tMaxValue<tVal );
}
template<typename T>
void HistogramStreamed_T<T>::Delete ( SphAttr_t tAttr )
{
// select bucket with larger counter to decrease
int iBucket = GetBucket ( tAttr, false );
assert ( iBucket>=0 && iBucket<m_iSize && m_uValues>0 );
if ( m_dBuckets[iBucket].m_iCount )
m_dBuckets[iBucket].m_iCount--;
m_uValues--;
if ( IsOutdated ( tAttr ) )
m_uOutdated--;
}
template<typename T>
void HistogramStreamed_T<T>::UpdateCounter ( SphAttr_t tAttr )
{
// select bucket with smaller counter to update
int iBucket = GetBucket ( tAttr, true );
assert ( iBucket>=0 && iBucket<m_iSize );
m_dBuckets[iBucket].m_iCount++;
m_uValues++;
if ( IsOutdated ( tAttr ) )
m_uOutdated++;
}
template<typename T>
bool HistogramStreamed_T<T>::IsOutdated() const
{
if ( !m_uValues )
return true;
// outdated values should be less than 30% for histogram to estimate properly
const float MAX_OUT_OF_RANGE = 0.3f;
return ( ( (float)m_uOutdated / (float)m_uValues )>=MAX_OUT_OF_RANGE );
}
template<typename T>
void HistogramStreamed_T<T>::Dump ( StringBuilder_c & tOut ) const
{
StringBuilder_c tBuf ( ";" );
for ( int i=0; i<m_iSize; i++ )
DumpValue ( m_dBuckets[i], tBuf );
tOut.Appendf ( "%s hist-streamed\nvalues:%d\n%s", m_sAttr.cstr(), m_iSize, tBuf.cstr() );
}
template<typename T>
void HistogramStreamed_T<T>::Push ( T tValue, int iCount )
{
m_dBuckets[m_iSize].m_tCentroid = tValue;
m_dBuckets[m_iSize].m_iCount = iCount;
m_iSize++;
m_uValues++;
if ( m_iSize>=m_dBuckets.GetLength() )
Aggregate(m_iMaxBins);
}
template<typename T>
void HistogramStreamed_T<T>::UpdateMinMax()
{
if ( m_iSize )
{
m_tMinValue = m_dBuckets[0].m_tCentroid;
m_tMaxValue = m_dBuckets[m_iSize-1].m_tCentroid;
} else
{
m_tMinValue = MAX_BY_TYPE;
m_tMaxValue = MIN_BY_TYPE;
}
}
template<typename T>
void HistogramStreamed_T<T>::Aggregate ( int iBins )
{
// order by centers
if ( m_iSize )
m_dBuckets.Sort ( bind ( &HSBucket_T<T>::m_tCentroid ), 0, m_iSize-1 );
int iSize = m_iSize;
if ( m_iSize<=iBins )
return;
using ListItem_t = HSListItem_T < T >;
CSphFixedVector<ListItem_t> dList ( m_iSize );
for (int i = 0; i<m_iSize; ++i)
{
dList[i].m_pPrev = dList.Begin() + i - 1;
dList[i].m_pNext = dList.Begin() + i + 1;
dList[i].m_iId = i;
dList[i].m_pItem = m_dBuckets.Begin() + i;
}
dList[0].m_pPrev = nullptr;
dList[m_iSize-1].m_pNext = nullptr;
using QItem_t = HSQueueItem_T<T>;
CSphQueue<QItem_t, QItem_t> tQueue ( 2 * m_iSize - iBins );
// do not all last item as it has wrong delta from head to tail
for (int i = 0; i<m_iSize-1; ++i)
{
const ListItem_t & tItem = dList[i];
Verify ( tQueue.Push ( QItem_t ( ListItem_t::Delta ( tItem ), tItem.m_iId ) ) );
}
while ( iSize>iBins && tQueue.GetLength() )
{
QItem_t tMin = tQueue.Root();
tQueue.Pop();
ListItem_t & tItem = dList[tMin.m_iId];
// check alive item and next to compare then compare delta from pqueue BUT not from actual entry
if ( !tItem.m_bAlive || !tItem.m_pNext || !tItem.m_pNext->m_bAlive || tMin.m_tDelta<ListItem_t::Delta ( tItem ) )
continue;
*tItem.m_pItem = tItem.m_pItem->Merge ( *tItem.m_pNext->m_pItem );
ListItem_t::Delete ( tItem.m_pNext );
// check and add curent -> next due changed delta from current, could be dupe and will be rejected above
if ( tItem.m_pNext && tItem.m_pNext->m_bAlive )
Verify ( tQueue.Push ( QItem_t ( ListItem_t::Delta ( tItem ), tMin.m_iId ) ) );
// check and add prev -> curent due changed delta to current, could be dupe and will be rejected above
if ( tItem.m_pPrev && tItem.m_pPrev->m_bAlive )
Verify ( tQueue.Push ( QItem_t ( ListItem_t::Delta ( *tItem.m_pPrev ), tItem.m_pPrev->m_iId ) ) );
iSize--;
}
// copy buckets
int iSrc = 0;
int iDst = 0;
while ( iSrc<m_iSize )
{
if ( dList[iSrc].m_bAlive )
{
m_dBuckets[iDst] = m_dBuckets[iSrc];
iDst++;
}
iSrc++;
}
m_iSize = iSize;
}
template<typename T>
int HistogramStreamed_T<T>::GetBucket ( T tValue, bool bCounterLess ) const
{
if ( tValue<m_tMinValue )
return 0;
if ( tValue>m_tMaxValue )
return ( m_iSize - 1 );
// m_dBuckets is larger than m_iSize
int iBestBucket = FindSpan ( m_dBuckets.Slice ( 0, m_iSize ), tValue );
assert ( iBestBucket>=0 && iBestBucket<m_iSize );
if ( iBestBucket<m_iSize-1 )
{
const HSBucket_T<T> & tBucketL = m_dBuckets[iBestBucket];
const HSBucket_T<T> & tBucketR = m_dBuckets[iBestBucket+1];
T tDistL = tValue - tBucketL.m_tCentroid;
T tDist = tBucketR.m_tCentroid - tBucketL.m_tCentroid;
if ( tDistL>( tDist/3 ) && tDistL<( tDist*2/3 ) ) // center case - select bucket with smaller \ larger counter
{
if ( bCounterLess )
iBestBucket = ( tBucketL.m_iCount<tBucketR.m_iCount ? iBestBucket : iBestBucket+1 );
else
iBestBucket = ( tBucketL.m_iCount>tBucketR.m_iCount ? iBestBucket : iBestBucket+1 );
} else // select closest bucket
iBestBucket = ( tDistL<( tDist-tDistL ) ? iBestBucket : iBestBucket+1 );
}
return iBestBucket;
}
template<typename T>
int HistogramStreamed_T<T>::LerpCounter ( int iBucket, T tVal ) const
{
const HSBucket_T<T> & tBucketL = m_dBuckets[iBucket];
const HSBucket_T<T> & tBucketR = m_dBuckets[iBucket+1];
assert ( tBucketL.m_tCentroid<=tVal && tVal<=tBucketR.m_tCentroid );
T tDistL = tVal - tBucketL.m_tCentroid;
T tDist = tBucketR.m_tCentroid - tBucketL.m_tCentroid;
double fLerp = (double)tDistL / (double)tDist;
fLerp = std::clamp ( fLerp, 0.0, 1.0 ); //clamp instead of assert as it runs out of bounds at INT64_MAX
return int ( fLerp * tBucketR.m_iCount + ( 1.0f - fLerp ) * tBucketL.m_iCount );
}
template<typename T>
HSBucketTrait_t HistogramStreamed_T<T>::GetBucket ( T tValue ) const
{
if ( tValue<m_tMinValue )
return HSBucketTrait_t ( 0, m_dBuckets[0].m_iCount );
if ( tValue>m_tMaxValue )
return HSBucketTrait_t ( m_iSize - 1, m_dBuckets[m_iSize-1].m_iCount );
int iItem = FindSpan ( m_dBuckets.Slice ( 0, m_iSize ), tValue );
int iCount = 0;
assert ( iItem>=0 && iItem<m_iSize );
if ( iItem==m_iSize-1 )
iCount = m_dBuckets[iItem].m_iCount;
else
iCount = LerpCounter ( iItem, tValue );
return HSBucketTrait_t ( iItem, iCount );
}
template<typename T>
bool HistogramStreamed_T<T>::EstimateRsetSize ( const CSphFilterSettings & tFilter, HistogramRset_t & tEstimate ) const
{
if ( !m_iSize )
return false;
tEstimate.m_iTotal = GetNumValues();
CommonFilterSettings_t tFS = tFilter;
ESphAttr eAttrType;
switch ( TYPE )
{
case HISTOGRAM_STREAMED_UINT32: eAttrType = SPH_ATTR_INTEGER; break;
case HISTOGRAM_STREAMED_FLOAT: eAttrType = SPH_ATTR_FLOAT; break;
default: eAttrType = SPH_ATTR_BIGINT; break;
}
FixupFilterSettings ( tFilter, eAttrType, tFS );
switch ( tFS.m_eType )
{
case SPH_FILTER_VALUES:
assert ( TYPE==HISTOGRAM_STREAMED_UINT32 || TYPE==HISTOGRAM_STREAMED_INT64 );
tEstimate = EstimateValues ( tFS.m_bExclude, tFilter.GetValues() );
return true;
case SPH_FILTER_RANGE:
assert ( TYPE==HISTOGRAM_STREAMED_UINT32 || TYPE==HISTOGRAM_STREAMED_INT64 );
tEstimate = EstimateRangeFilter ( tFS.m_bExclude, tFS.m_bHasEqualMin, tFS.m_bHasEqualMax, tFS.m_bOpenLeft, tFS.m_bOpenRight, (T)tFS.m_iMinValue, (T)tFS.m_iMaxValue );
return true;
case SPH_FILTER_FLOATRANGE:
assert ( TYPE==HISTOGRAM_STREAMED_FLOAT );
tEstimate = EstimateRangeFilter ( tFS.m_bExclude, tFS.m_bHasEqualMin, tFS.m_bHasEqualMax, tFS.m_bOpenLeft, tFS.m_bOpenRight, (T)tFS.m_fMinValue, (T)tFS.m_fMaxValue );
return true;
case SPH_FILTER_STRING:
case SPH_FILTER_STRING_LIST:
{
int iItemsCount = Max ( tFilter.m_dStrings.GetLength(), tFilter.GetNumValues() );
CSphFixedVector<SphAttr_t> dHashes ( iItemsCount );
for ( int i=0; i<iItemsCount; i++ )
{
const CSphString & sStr = tFilter.m_dStrings[i];
int iLen = sStr.Length();
dHashes[i] = iLen ? LibcCIHash_fn::Hash ( (const BYTE*)sStr.cstr(), iLen ) : 0;
}
// clean up duplicates and string collisions
dHashes.Sort();
int iHashesCount = sphUniq ( dHashes.Begin(), dHashes.GetLength() );
tEstimate = EstimateValues ( tFS.m_bExclude, dHashes.Slice ( 0, iHashesCount ) );
}
return true;
default:
break;
}
return false;
}
template<typename T>
HistogramRset_t HistogramStreamed_T<T>::EstimateValues ( bool bExclude, const VecTraits_T<SphAttr_t> & dValues ) const
{
HistogramRset_t tRes;
int iPrevBucket = INT_MIN;
for ( auto tValue : dValues )
{
HSBucketTrait_t tItem = GetBucket ( tValue );
if ( tItem.m_iBucket!=iPrevBucket )
{
tRes.m_iTotal += tItem.m_iCount;
iPrevBucket = tItem.m_iBucket;
}
}
if ( bExclude )
tRes.m_iTotal = m_uValues - tRes.m_iTotal;
return tRes;
}
static HistogramRset_t operator+ ( const HistogramRset_t & tA, HistogramRset_t & tB )
{
return { tA.m_iTotal + tB.m_iTotal };
}
template<typename T>
HistogramRset_t HistogramStreamed_T<T>::EstimateRangeFilter ( bool bExclude, bool bHasEqualMin, bool bHasEqualMax, bool bOpenLeft, bool bOpenRight, T tMinValue, T tMaxValue ) const
{
HistogramRset_t tEstimate;
if ( !bExclude )
return EstimateInterval ( tMinValue, tMaxValue, bHasEqualMin, bHasEqualMax, bOpenLeft, bOpenRight );
assert ( !bOpenLeft || !bOpenRight );
if ( bOpenRight )
tEstimate = EstimateInterval ( (T)0, tMinValue, false, !bHasEqualMin, true, false );
else if ( bOpenLeft )
tEstimate = EstimateInterval ( tMaxValue, (T)0, !bHasEqualMax, false, false, true );
else
{
tEstimate = EstimateInterval ( (T)0, tMinValue, false, !bHasEqualMin, true, false );
tEstimate = EstimateInterval ( tMaxValue, (T)0, !bHasEqualMax, false, false, true ) + tEstimate;
}
return tEstimate;
}
template<typename T>
T HistogramStreamed_T<T>::Saturate ( T tVal ) const
{
if ( tVal<m_tMinValue )
return m_tMinValue;
if ( tVal>m_tMaxValue )
return m_tMaxValue;
return tVal;
}
template<typename T>
HistogramRset_t HistogramStreamed_T<T>::EstimateInterval ( T tMin, T tMax, bool bHasEqualMin, bool bHasEqualMax, bool bOpenLeft, bool bOpenRight ) const
{
if ( TYPE==HISTOGRAM_STREAMED_UINT32 || TYPE==HISTOGRAM_STREAMED_INT64 )
{
if ( !bOpenLeft && !bHasEqualMin && tMin < MAX_BY_TYPE )
tMin++;
if ( !bOpenRight && !bHasEqualMax && tMax > MIN_BY_TYPE )
tMax--;
}
tMin = Saturate ( tMin );
tMax = Saturate ( tMax );
HistogramRset_t tEstimate;
T tRangeMin = MAX_BY_TYPE;
T tRangeMax = MIN_BY_TYPE;
// open left means to process all buckets from start
int iStartBucket = 0;
if ( !bOpenLeft )
iStartBucket = FindSpan ( m_dBuckets.Slice ( 0, m_iSize ), tMin );
int iChecked = 0;
for ( int iBucket=iStartBucket+1; iBucket<m_iSize; iBucket++ )
{
const HSBucket_T<T> & tBucket = m_dBuckets[iBucket];
// open right means to process all buckets till end
if ( !bOpenRight && tBucket.m_tCentroid>tMax )
break;
tEstimate.m_iTotal += tBucket.m_iCount;
iChecked++;
tRangeMin = Min ( tRangeMin, tBucket.m_tCentroid );
tRangeMax = Max ( tRangeMax, tBucket.m_tCentroid );
}
if ( !iChecked ) // interval inside single bucket
{
tEstimate.m_iTotal = m_dBuckets[iStartBucket].m_iCount;
tRangeMin = m_dBuckets[iStartBucket].m_tCentroid;
tRangeMax = m_dBuckets[iStartBucket].m_tCentroid;
if ( iStartBucket+1<m_iSize )
{
DWORD uMinCount = 0;
DWORD uMaxCount = 0;
if ( m_dBuckets[iStartBucket].m_tCentroid<tMin && tMin<m_dBuckets[iStartBucket+1].m_tCentroid )
uMinCount = LerpCounter ( iStartBucket, tMin );
if ( m_dBuckets[iStartBucket].m_tCentroid<tMax && tMax<m_dBuckets[iStartBucket+1].m_tCentroid )
uMaxCount = LerpCounter ( iStartBucket, tMax );
if ( uMinCount || uMaxCount )
tEstimate.m_iTotal = Max ( uMinCount, uMaxCount );
}
}
else // count head bucket interval
tEstimate.m_iTotal += bOpenLeft ? m_dBuckets[iStartBucket].m_iCount : LerpCounter ( iStartBucket, tMin );
return tEstimate;
}
template<> const HistogramType_e HistogramStreamed_T<DWORD>::TYPE = HISTOGRAM_STREAMED_UINT32;
template<> const DWORD HistogramStreamed_T<DWORD>::MIN_BY_TYPE = 0;
template<> const DWORD HistogramStreamed_T<DWORD>::MAX_BY_TYPE = UINT32_MAX;
template<> const HistogramType_e HistogramStreamed_T<int64_t>::TYPE = HISTOGRAM_STREAMED_INT64;
template<> const int64_t HistogramStreamed_T<int64_t>::MIN_BY_TYPE = 0;
template<> const int64_t HistogramStreamed_T<int64_t>::MAX_BY_TYPE = INT64_MAX;
template<> const HistogramType_e HistogramStreamed_T<float>::TYPE = HISTOGRAM_STREAMED_FLOAT;
template<> const float HistogramStreamed_T<float>::MIN_BY_TYPE = FLT_MIN;
template<> const float HistogramStreamed_T<float>::MAX_BY_TYPE = FLT_MAX;
template<> void HistogramStreamed_T<DWORD>::DumpValue ( const HSBucket_T < DWORD > & tVal, StringBuilder_c & tBuf ) const
{
tBuf.Sprintf ( "%u,%d", tVal.m_tCentroid, tVal.m_iCount );
}
template<> void HistogramStreamed_T<int64_t>::DumpValue ( const HSBucket_T < int64_t > & tVal, StringBuilder_c & tBuf ) const
{
tBuf.Sprintf ( INT64_FMT ",%d", tVal.m_tCentroid, tVal.m_iCount );
}
template<> void HistogramStreamed_T<float>::DumpValue ( const HSBucket_T < float > & tVal, StringBuilder_c & tBuf ) const
{
tBuf.Sprintf ( "%.3f,%d", tVal.m_tCentroid, tVal.m_iCount );
}
//////////////////////////////////////////////////////////////////////////
static std::unique_ptr<Histogram_i> CreateHistogram ( const CSphString & sAttr, HistogramType_e eType, int iSize );
HistogramContainer_c::~HistogramContainer_c()
{
Reset();
}
void HistogramContainer_c::Reset()
{
for ( auto& tHistogram : m_dHistogramHash )
SafeDelete ( tHistogram.second );
m_dHistogramHash.Reset();
m_dHistograms.Resize(0);
}
bool HistogramContainer_c::Save ( const CSphString & sFile, CSphString & sError )
{
CSphWriter tWriter;
if ( !tWriter.OpenFile ( sFile, sError ) )
return false;
tWriter.PutDword ( m_dHistogramHash.GetLength() );
for ( auto& tHistogram : m_dHistogramHash )
{
Histogram_i * pHistogram = tHistogram.second;
assert ( pHistogram );
pHistogram->Finalize();
tWriter.PutString ( pHistogram->GetAttrName() );
tWriter.PutDword ( pHistogram->GetType() );
if ( !pHistogram->Save ( tWriter ) )
{
sError.SetSprintf ( "error saving histograms to %s", sFile.cstr() );
return false;
}
}
tWriter.CloseFile();
if ( tWriter.IsError() )
{
sError.SetSprintf ( "error saving histograms to %s", sFile.cstr() );
return false;
}
return true;
}
bool HistogramContainer_c::Load ( const CSphString & sFile, CSphString & sError )
{
Reset();
CSphAutoreader tReader;
if ( !tReader.Open ( sFile, sError ) )
return false;
int nHistograms = tReader.GetDword();
for ( int i = 0; i < nHistograms; i++ )
{
CSphString sAttr = tReader.GetString();
HistogramType_e eType = (HistogramType_e)tReader.GetDword();
std::unique_ptr<Histogram_i> pHistogram = CreateHistogram ( sAttr, eType, 0 );
if ( !pHistogram )
{
sError.SetSprintf ( "error loading histograms from %s", sFile.cstr() );
return false;
}
if ( !pHistogram->Load ( tReader, sError ) )
return false;
if ( !Add ( std::move ( pHistogram ) ) )
{
sError.SetSprintf ( "duplicate histograms found in %s", sFile.cstr() );
return false;
}
}
if ( tReader.GetErrorFlag() )
{
sError = tReader.GetErrorMessage();
return false;
}
return true;
}
bool HistogramContainer_c::Add ( std::unique_ptr<Histogram_i> pHistogram )
{
assert ( pHistogram );
if ( !m_dHistogramHash.Add ( pHistogram.get(), pHistogram->GetAttrName() ) )
return false;
m_dHistograms.Add ( pHistogram.release() );
return true;
}
void HistogramContainer_c::Remove ( const CSphString & sAttr )
{
std::unique_ptr<Histogram_i> pHistogram { Get(sAttr) };
if ( !pHistogram )
return;
m_dHistograms.RemoveValue ( pHistogram.get() );
m_dHistogramHash.Delete ( sAttr );
}
Histogram_i * HistogramContainer_c::Get ( const CSphString & sAttr ) const
{
Histogram_i ** ppHistogram = m_dHistogramHash(sAttr);
return ppHistogram ? *ppHistogram : nullptr;
}
//////////////////////////////////////////////////////////////////////////
static bool CanCreateHistogram ( const CSphString & sAttrName, ESphAttr eAttrType )
{
if ( sphIsInternalAttr ( sAttrName ) )
return false;
return ( eAttrType==SPH_ATTR_INTEGER || eAttrType==SPH_ATTR_BIGINT || eAttrType==SPH_ATTR_BOOL || eAttrType==SPH_ATTR_FLOAT || eAttrType==SPH_ATTR_TIMESTAMP || eAttrType==SPH_ATTR_UINT32SET || eAttrType==SPH_ATTR_INT64SET || eAttrType==SPH_ATTR_STRING );
}
static std::unique_ptr<Histogram_i> CreateHistogram ( const CSphString & sAttr, HistogramType_e eType, int iSize )
{
const int MAX_BUCKETS = 1024;
if ( !iSize )
iSize = MAX_BUCKETS;
switch ( eType )
{
case HISTOGRAM_STREAMED_UINT32: return std::make_unique<HistogramStreamed_T<DWORD>> ( sAttr, iSize );
case HISTOGRAM_STREAMED_INT64: return std::make_unique<HistogramStreamed_T<int64_t>> ( sAttr, iSize );
case HISTOGRAM_STREAMED_FLOAT: return std::make_unique<HistogramStreamed_T<float>> ( sAttr, iSize );
default: return nullptr;
}
}
std::unique_ptr<Histogram_i> CreateHistogram ( const CSphString & sAttr, ESphAttr eAttrType, int iSize )
{
if ( !CanCreateHistogram ( sAttr, eAttrType ) )
return nullptr;
switch ( eAttrType )
{
case SPH_ATTR_INTEGER:
case SPH_ATTR_TIMESTAMP:
case SPH_ATTR_BOOL:
case SPH_ATTR_UINT32SET:
return CreateHistogram ( sAttr, HISTOGRAM_STREAMED_UINT32, iSize );
case SPH_ATTR_STRING:
return CreateHistogram ( sAttr, HISTOGRAM_STREAMED_INT64, iSize );
case SPH_ATTR_INT64SET:
case SPH_ATTR_BIGINT:
return CreateHistogram ( sAttr, HISTOGRAM_STREAMED_INT64, iSize );
case SPH_ATTR_FLOAT: return CreateHistogram ( sAttr, HISTOGRAM_STREAMED_FLOAT, iSize );
default: return nullptr;
}
}
void BuildCreateHistograms ( HistogramContainer_c & tHistograms, CSphVector<PlainOrColumnar_t> & dAttrsForHistogram, const ISphSchema & tSchema )
{
int iColumnar = 0;
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = tSchema.GetAttr(i);
std::unique_ptr<Histogram_i> pHistogram = CreateHistogram ( tAttr.m_sName, tAttr.m_eAttrType );
if ( pHistogram )
{
tHistograms.Add ( std::move ( pHistogram ) );
dAttrsForHistogram.Add ( PlainOrColumnar_t ( tAttr, iColumnar ) );
}
if ( tAttr.IsColumnar() )
iColumnar++;
}
}
void BuildStoreHistograms ( RowID_t tRowID, const CSphRowitem * pRow, const BYTE * pPool, CSphVector<ScopedTypedIterator_t> & dIterators, const CSphVector<PlainOrColumnar_t> & dAttrs, HistogramContainer_c & tHistograms )
{
for ( int iAttr=0; iAttr<dAttrs.GetLength(); iAttr++ )
{
const PlainOrColumnar_t & tSrc = dAttrs[iAttr];
switch ( tSrc.m_eType )
{
case SPH_ATTR_UINT32SET:
case SPH_ATTR_INT64SET:
{
const BYTE * pSrc = nullptr;
int iBytes = tSrc.Get ( tRowID, pRow, pPool, dIterators, pSrc );
int iValues = iBytes / ( tSrc.m_eType==SPH_ATTR_UINT32SET ? sizeof(DWORD) : sizeof(int64_t) );
for ( int iVal=0; iVal<iValues; iVal++ )
tHistograms.Insert ( iAttr, pSrc[iVal] );
}
break;
case SPH_ATTR_STRING:
{
const BYTE * pSrc = nullptr;
int iBytes = tSrc.Get ( tRowID, pRow, pPool, dIterators, pSrc );
SphAttr_t uHash = sphCRC32 ( pSrc, iBytes );
tHistograms.Insert ( iAttr, uHash );
}
break;
default:
tHistograms.Insert ( iAttr, tSrc.Get ( tRowID, pRow, dIterators ) );
break;
}
}
}
| 27,042
|
C++
|
.cpp
| 787
| 31.954257
| 255
| 0.701853
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,826
|
sphinxexpr.cpp
|
manticoresoftware_manticoresearch/src/sphinxexpr.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxexpr.h"
#include "sphinxplugin.h"
#include "sphinxutils.h"
#include "attribute.h"
#include "sphinxint.h"
#include "sphinxjson.h"
#include "docstore.h"
#include "coroutine.h"
#include "stackmock.h"
#include "client_task_info.h"
#include "exprtraits.h"
#include "columnarexpr.h"
#include "conversion.h"
#include "geodist.h"
#include "knnmisc.h"
#include <time.h>
#include <math.h>
#include "uni_algo/case.h"
#include "datetime.h"
#include "exprdatetime.h"
#include "exprdocstore.h"
#if WITH_RE2
#include <re2/re2.h>
#endif
#ifndef M_LOG2E
#define M_LOG2E 1.44269504088896340736
#endif
#ifndef M_LOG10E
#define M_LOG10E 0.434294481903251827651
#endif
namespace { // static
fnGetUserVar& refUservars()
{
static fnGetUserVar pUservarsHook = nullptr;
return pUservarsHook;
}
}
void SetUserVarsHook ( fnGetUserVar fnHook )
{
refUservars () = fnHook;
}
bool UservarsAvailable ()
{
return refUservars ()!=nullptr;
}
UservarIntSet_c Uservars ( const CSphString & sUservar )
{
assert ( UservarsAvailable () );
return refUservars () ( sUservar );
}
inline Str_t CurrentUser()
{
if ( session::GetVip () )
return { "VIP", 3 };
return { "Usual", 5 };
}
CSphString& sphinxexpr::MySQLVersion()
{
static CSphString sSQLVersion;
return sSQLVersion;
}
inline int ConnID ()
{
return session::GetConnID ();
}
//////////////////////////////////////////////////////////////////////////
// EVALUATION ENGINE
//////////////////////////////////////////////////////////////////////////
struct ExprLocatorTraits_t
{
CSphAttrLocator m_tLocator;
CSphString m_sAttr;
CSphString m_sColumnarAttr;
ExprLocatorTraits_t ( const CSphAttrLocator & tLocator, const CSphString & sAttr ) : m_tLocator ( tLocator ), m_sAttr ( sAttr ) {}
virtual ~ExprLocatorTraits_t() = default;
virtual void HandleCommand ( ESphExprCommand eCmd, void * pArg )
{
switch ( eCmd )
{
case SPH_EXPR_GET_DEPENDENT_COLS:
if ( !m_sAttr.IsEmpty() )
static_cast<StrVec_t*>(pArg)->Add(m_sAttr);
break;
case SPH_EXPR_SET_COLUMNAR_COL:
m_sColumnarAttr = *static_cast < CSphString* >(pArg);
break;
case SPH_EXPR_GET_COLUMNAR_COL:
*static_cast < CSphString* >(pArg) = m_sColumnarAttr;
break;
default:
break;
}
}
virtual void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema )
{
sphFixupLocator ( m_tLocator, pOldSchema, pNewSchema );
}
protected:
ExprLocatorTraits_t ( const ExprLocatorTraits_t& ) = default;
};
const BYTE * ISphExpr::StringEvalPacked ( const CSphMatch & tMatch ) const
{
const BYTE * pStr = nullptr;
int iStrLen = StringEval ( tMatch, &pStr );
auto pRes = sphPackPtrAttr ( { pStr, iStrLen } );
FreeDataPtr ( *this, pStr );
return pRes;
}
template<class BaseExpr_T>
class Expr_WithLocator_T : public BaseExpr_T, public ExprLocatorTraits_t
{
public:
Expr_WithLocator_T ( const CSphAttrLocator & tLocator, const CSphString & sAttr )
: ExprLocatorTraits_t ( tLocator, sAttr )
{}
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) override
{
sphFixupLocator ( m_tLocator, pOldSchema, pNewSchema );
}
void Command ( ESphExprCommand eCmd, void * pArg ) override
{
HandleCommand ( eCmd, pArg );
}
protected:
Expr_WithLocator_T ( const Expr_WithLocator_T & rhs )
: ExprLocatorTraits_t (rhs) {}
};
class Expr_WithLocator_c : public Expr_WithLocator_T<ISphExpr>
{
public:
Expr_WithLocator_c ( const CSphAttrLocator & tLocator, const CSphString & sAttr )
: Expr_WithLocator_T ( tLocator, sAttr )
{}
};
// has string expression traits, but has no locator
class Expr_StrNoLocator_c : public ISphStringExpr
{
public:
void FixupLocator ( const ISphSchema * /*pOldSchema*/, const ISphSchema * /*pNewSchema*/ ) override {}
};
// base class does not convert string to float
float ISphStringExpr::Eval ( const CSphMatch & tMatch ) const
{
float fVal = 0.f;
const char * pBuf = nullptr;
int iLen = StringEval ( tMatch, (const BYTE **) &pBuf );
if ( iLen && pBuf )
{
const char * pMax = sphFindLastNumeric ( pBuf, iLen );
if ( pBuf<pMax )
{
fVal = (float) strtod ( pBuf, nullptr );
}
else
{
CSphString sBuf;
sBuf.SetBinary ( pBuf, iLen );
fVal = (float) strtod ( sBuf.cstr(), nullptr );
}
}
FreeDataPtr ( *this, pBuf );
return fVal;
}
// base class does not convert string to int
int ISphStringExpr::IntEval ( const CSphMatch & tMatch ) const
{
int iVal = 0;
const char * pBuf = nullptr;
int iLen = StringEval ( tMatch, (const BYTE **) &pBuf );
if ( iLen && pBuf )
{
const char * pMax = sphFindLastNumeric ( pBuf, iLen );
if ( pBuf<pMax )
{
iVal = strtol ( pBuf, NULL, 10 );
}
else
{
CSphString sBuf;
sBuf.SetBinary ( pBuf, iLen );
iVal = strtol ( sBuf.cstr(), NULL, 10 );
}
}
FreeDataPtr ( *this, pBuf );
return iVal;
}
// base class does not convert string to int64
int64_t ISphStringExpr::Int64Eval ( const CSphMatch & tMatch ) const
{
int64_t iVal = 0;
const char * pBuf = nullptr;
int iLen = StringEval ( tMatch, (const BYTE **) &pBuf );
if ( iLen && pBuf )
{
const char * pMax = sphFindLastNumeric ( pBuf, iLen );
if ( pBuf<pMax )
{
iVal = strtoll ( pBuf, nullptr, 10 );
}
else
{
CSphString sBuf;
sBuf.SetBinary ( pBuf, iLen );
iVal = strtoll ( sBuf.cstr(), nullptr, 10 );
}
}
FreeDataPtr ( *this, pBuf );
return iVal;
}
//////////////////////////////////////////////////////////////////////////
class Expr_GetInt_c : public Expr_WithLocator_c
{
public:
Expr_GetInt_c ( const CSphAttrLocator & tLocator, const CSphString & sAttr ) : Expr_WithLocator_c ( tLocator, sAttr ) {}
float Eval ( const CSphMatch & tMatch ) const final { return (float) tMatch.GetAttr ( m_tLocator ); } // FIXME! OPTIMIZE!!! we can go the short route here
int IntEval ( const CSphMatch & tMatch ) const final { return (int)tMatch.GetAttr ( m_tLocator ); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return (int64_t)tMatch.GetAttr ( m_tLocator ); }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_GetInt_c");
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_GetInt_c ( *this );
}
private:
Expr_GetInt_c ( const Expr_GetInt_c& ) = default;
};
class Expr_GetBits_c : public Expr_WithLocator_c
{
public:
Expr_GetBits_c ( const CSphAttrLocator & tLocator, const CSphString & sAttr ) : Expr_WithLocator_c ( tLocator, sAttr ) {}
float Eval ( const CSphMatch & tMatch ) const final { return (float) tMatch.GetAttr ( m_tLocator ); }
int IntEval ( const CSphMatch & tMatch ) const final { return (int)tMatch.GetAttr ( m_tLocator ); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return (int64_t)tMatch.GetAttr ( m_tLocator ); }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_GetBits_c");
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_GetBits_c ( *this );
}
private:
Expr_GetBits_c ( const Expr_GetBits_c& ) = default;
};
class Expr_GetSint_c : public Expr_WithLocator_c
{
public:
Expr_GetSint_c ( const CSphAttrLocator & tLocator, const CSphString & sAttr ) : Expr_WithLocator_c ( tLocator, sAttr ) {}
float Eval ( const CSphMatch & tMatch ) const final { return (float)(int)tMatch.GetAttr ( m_tLocator ); }
int IntEval ( const CSphMatch & tMatch ) const final { return (int)tMatch.GetAttr ( m_tLocator ); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return (int)tMatch.GetAttr ( m_tLocator ); }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_GetSint_c");
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_GetSint_c ( *this );
}
private:
Expr_GetSint_c ( const Expr_GetSint_c& ) = default;
};
class Expr_GetFloat_c : public Expr_WithLocator_c
{
public:
Expr_GetFloat_c ( const CSphAttrLocator & tLocator, const CSphString & sAttr ) : Expr_WithLocator_c ( tLocator, sAttr ) {}
float Eval ( const CSphMatch & tMatch ) const final { return tMatch.GetAttrFloat ( m_tLocator ); }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_GetFloat_c");
return CALC_DEP_HASHES();
}
ISphExpr* Clone() const final
{
return new Expr_GetFloat_c ( *this );
}
private:
Expr_GetFloat_c ( const Expr_GetFloat_c& ) = default;
};
class Expr_GetString_c : public Expr_WithLocator_T<ISphStringExpr>
{
public:
Expr_GetString_c ( const CSphAttrLocator & tLocator, const CSphString & sAttr )
: Expr_WithLocator_T ( tLocator, sAttr )
{}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
Expr_WithLocator_T::Command ( eCmd, pArg );
if ( eCmd==SPH_EXPR_SET_BLOB_POOL )
m_pBlobPool = (const BYTE*)pArg;
}
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const final
{
if ( m_tLocator.IsBlobAttr() && !m_pBlobPool )
return 0;
auto dStr = tMatch.FetchAttrData ( m_tLocator, m_pBlobPool );
*ppStr = dStr.first; /// FIXME! m.b. all StringEval return BytesBlob_t?
return dStr.second;
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_GetString_c");
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_GetString_c ( *this );
}
private:
const BYTE * m_pBlobPool {nullptr};
Expr_GetString_c ( const Expr_GetString_c& rhs ) : Expr_WithLocator_T ( rhs ) {}
};
class Expr_GetMva_c : public Expr_WithLocator_c
{
public:
Expr_GetMva_c ( const CSphAttrLocator & tLocator, const CSphString & sAttr )
: Expr_WithLocator_c ( tLocator, sAttr )
{}
float Eval ( const CSphMatch & ) const final { assert ( 0 ); return 0; }
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
Expr_WithLocator_c::Command ( eCmd, pArg );
if ( eCmd==SPH_EXPR_SET_BLOB_POOL )
m_pBlobPool = (const BYTE *)pArg;
}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final
{
return (int64_t)sphPackPtrAttr ( tMatch.FetchAttrData ( m_tLocator, m_pBlobPool ) );
}
ByteBlob_t MvaEval ( const CSphMatch & tMatch ) const final
{
return tMatch.FetchAttrData ( m_tLocator, m_pBlobPool );
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_GetMva_c");
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_GetMva_c ( *this );
}
private:
const BYTE * m_pBlobPool {nullptr};
Expr_GetMva_c ( const Expr_GetMva_c& rhs ) : Expr_WithLocator_c ( rhs ) {}
};
class Expr_GetFactorsAttr_c : public Expr_WithLocator_c
{
public:
Expr_GetFactorsAttr_c ( const CSphAttrLocator & tLocator, const CSphString & sAttr )
: Expr_WithLocator_c ( tLocator, sAttr )
{}
float Eval ( const CSphMatch & ) const final { assert ( 0 ); return 0; }
const BYTE * FactorEval ( const CSphMatch & tMatch ) const final
{
auto * pPacked = (const BYTE *)tMatch.GetAttr ( m_tLocator );
return sphUnpackPtrAttr ( pPacked ).first;
}
const BYTE * FactorEvalPacked ( const CSphMatch & tMatch ) const final
{
return (const BYTE *)tMatch.GetAttr ( m_tLocator );
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_GetFactorsAttr_c");
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_GetFactorsAttr_c ( *this );
}
private:
Expr_GetFactorsAttr_c ( const Expr_GetFactorsAttr_c& ) = default;
};
class Expr_GetConst_c : public Expr_NoLocator_c
{
public:
explicit Expr_GetConst_c ( float fValue )
: m_fValue ( fValue )
{}
float Eval ( const CSphMatch & ) const final { return m_fValue; }
int IntEval ( const CSphMatch & ) const final { return (int)m_fValue; }
int64_t Int64Eval ( const CSphMatch & ) const final { return (int64_t)m_fValue; }
bool IsConst () const final { return true; }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_GetConst_c");
CALC_POD_HASH(m_fValue);
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_GetConst_c ( *this );
}
private:
float m_fValue {0.0f};
Expr_GetConst_c ( const Expr_GetConst_c& rhs ) : m_fValue ( rhs.m_fValue ) {}
};
class Expr_GetIntConst_c : public Expr_NoLocator_c
{
public:
explicit Expr_GetIntConst_c ( int iValue )
: m_iValue ( iValue )
{}
float Eval ( const CSphMatch & ) const final { return (float) m_iValue; } // no assert() here cause generic float Eval() needs to work even on int-evaluator tree
int IntEval ( const CSphMatch & ) const final { return m_iValue; }
int64_t Int64Eval ( const CSphMatch & ) const final { return m_iValue; }
bool IsConst () const final { return true; }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_GetIntConst_c");
CALC_POD_HASH(m_iValue);
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_GetIntConst_c ( *this );
}
private:
int m_iValue {0};
Expr_GetIntConst_c ( const Expr_GetIntConst_c& rhs ) : m_iValue ( rhs.m_iValue ) {}
};
class Expr_GetInt64Const_c : public Expr_NoLocator_c
{
public:
explicit Expr_GetInt64Const_c ( int64_t iValue )
: m_iValue ( iValue )
{}
float Eval ( const CSphMatch & ) const final { return (float) m_iValue; } // no assert() here cause generic float Eval() needs to work even on int-evaluator tree
int IntEval ( const CSphMatch & ) const final { assert ( 0 ); return (int)m_iValue; }
int64_t Int64Eval ( const CSphMatch & ) const final { return m_iValue; }
bool IsConst () const final { return true; }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_GetInt64Const_c");
CALC_POD_HASH(m_iValue);
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_GetInt64Const_c ( *this );
}
private:
int64_t m_iValue {0};
Expr_GetInt64Const_c ( const Expr_GetInt64Const_c& rhs ) : m_iValue ( rhs.m_iValue ) {}
};
class Expr_GetStrConst_c : public Expr_StrNoLocator_c
{
public:
Expr_GetStrConst_c ( const char * sVal, int iLen, bool bUnescape )
{
if ( iLen>0 )
{
if ( bUnescape )
std::tie ( m_sVal, m_iLen ) = SqlUnescapeN ( sVal, iLen );
else {
m_sVal.SetBinary ( sVal, iLen );
m_iLen = iLen;
}
} else
m_iLen = m_sVal.Length();
}
Expr_GetStrConst_c ( Str_t sVal, bool bUnescape )
: Expr_GetStrConst_c ( (const char*) sVal.first, (int) sVal.second, bUnescape )
{}
int StringEval ( const CSphMatch &, const BYTE ** ppStr ) const final
{
*ppStr = (const BYTE*) m_sVal.cstr();
return m_iLen;
}
bool IsConst () const final { return true; }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_GetStrConst_c");
CALC_STR_HASH(m_sVal, m_iLen);
return CALC_DEP_HASHES();
}
const CSphString & GetStr() { return m_sVal; }
ISphExpr * Clone () const final
{
return new Expr_GetStrConst_c ( *this );
}
private:
CSphString m_sVal;
int m_iLen {0};
Expr_GetStrConst_c ( const Expr_GetStrConst_c& rhs ) : m_sVal ( rhs.m_sVal ), m_iLen ( rhs.m_iLen ) {}
};
class Expr_GetZonespanlist_c : public Expr_StrNoLocator_c
{
public:
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const final
{
assert ( ppStr );
if ( !m_pData || !m_pData->GetLength() )
{
*ppStr = nullptr;
return 0;
}
m_sBuilder.Clear();
const CSphVector<int> & dSpans = *m_pData;
int iStart = tMatch.m_iTag + 1; // spans[tag] contains the length, so the 1st data index is tag+1
int iEnd = iStart + dSpans [ tMatch.m_iTag ]; // [start,end) now covers all data indexes
for ( int i=iStart; i<iEnd; i+=2 )
m_sBuilder.Appendf ( " %d:%d", 1+dSpans[i], 1+dSpans[i+1] ); // convert our 0-based span numbers to human 1-based ones
auto iRes = m_sBuilder.GetLength ();
*ppStr = m_sBuilder.Leak();
return iRes;
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
if ( eCmd==SPH_EXPR_SET_EXTRA_DATA )
static_cast<ISphExtra*>(pArg)->ExtraData ( EXTRA_GET_DATA_ZONESPANS, (void**)const_cast<IntVec_t**>(&m_pData) );
}
bool IsDataPtrAttr() const final
{
return true;
}
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & bDisable ) final
{
bDisable = true; // disable caching for now, might add code to process if necessary
return 0;
}
ISphExpr * Clone () const final
{
return new Expr_GetZonespanlist_c;
}
private:
const CSphVector<int> * m_pData {nullptr};
mutable StringBuilder_c m_sBuilder;
};
class Expr_GetRankFactors_c : public Expr_StrNoLocator_c
{
public:
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const final
{
assert ( ppStr );
if ( !m_pFactors )
{
*ppStr = nullptr;
return 0;
}
CSphString * sVal = (*m_pFactors) ( tMatch.m_tRowID );
if ( !sVal )
{
*ppStr = nullptr;
return 0;
}
int iLen = sVal->Length();
*ppStr = (const BYTE*)sVal->Leak();
m_pFactors->Delete ( tMatch.m_tRowID );
return iLen;
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
if ( eCmd==SPH_EXPR_SET_EXTRA_DATA )
static_cast<ISphExtra*>(pArg)->ExtraData ( EXTRA_GET_DATA_RANKFACTORS, (void**)&m_pFactors );
}
bool IsDataPtrAttr() const final
{
return true;
}
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & bDisable ) final
{
bDisable = true; // disable caching for now, might add code to process if necessary
return 0;
}
ISphExpr * Clone () const final
{
return new Expr_GetRankFactors_c;
}
private:
/// hash type MUST BE IN SYNC with RankerState_Export_fn in sphinxsearch.cpp
CSphOrderedHash < CSphString, RowID_t, IdentityHash_fn, 256 > * m_pFactors {nullptr};
};
class Expr_GetPackedFactors_c : public Expr_StrNoLocator_c
{
public:
const BYTE * FactorEval ( const CSphMatch & tMatch ) const final
{
const BYTE * pData = nullptr;
int iDataLen = FetchHashEntry ( tMatch, pData );
if ( !pData )
return nullptr;
auto * pResult = new BYTE[iDataLen];
memcpy ( pResult, pData, iDataLen );
return pResult;
}
const BYTE * FactorEvalPacked ( const CSphMatch & tMatch ) const final
{
const BYTE * pData = nullptr;
int iDataLen = FetchHashEntry ( tMatch, pData );
if ( !pData )
return nullptr;
return sphPackPtrAttr( {pData, iDataLen} );
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
if ( eCmd==SPH_EXPR_SET_EXTRA_DATA )
static_cast<ISphExtra*>(pArg)->ExtraData ( EXTRA_GET_DATA_PACKEDFACTORS, (void**)&m_pHash );
}
bool IsDataPtrAttr() const final
{
return true;
}
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & bDisable ) final
{
bDisable = true; // disable caching for now, might add code to process if necessary
return 0;
}
ISphExpr * Clone () const final
{
return new Expr_GetPackedFactors_c;
}
private:
SphFactorHash_t * m_pHash {nullptr};
int FetchHashEntry ( const CSphMatch & tMatch, const BYTE * & pData ) const
{
pData = nullptr;
if ( !m_pHash || !m_pHash->GetLength() )
return 0;
DWORD uKey = FactorPoolHash ( RowTagged_t ( tMatch ), m_pHash->GetLength() );
SphFactorHashEntry_t * pEntry = (*m_pHash)[ uKey ];
assert ( pEntry );
while ( pEntry && pEntry->m_tRow!=RowTagged_t ( tMatch ) )
pEntry = pEntry->m_pNext;
if ( !pEntry )
return 0;
pData = pEntry->m_pData;
return int((BYTE *)pEntry - pEntry->m_pData);
}
};
class Expr_BM25F_c : public Expr_NoLocator_c
{
public:
Expr_BM25F_c ( float k1, float b, CSphVector<CSphNamedVariant> * pFieldWeights )
: m_fK1 (k1)
, m_fB ( b )
{
if ( pFieldWeights )
{
m_iCount = pFieldWeights->GetLength ();
m_pFieldWeights = pFieldWeights->LeakData ();
}
}
float Eval ( const CSphMatch & tMatch ) const final
{
if ( !m_pHash || !m_pHash->GetLength() )
return 0.0f;
DWORD uKey = FactorPoolHash ( RowTagged_t ( tMatch ), m_pHash->GetLength() );
SphFactorHashEntry_t * pEntry = (*m_pHash)[ uKey ];
assert ( pEntry );
while ( pEntry && pEntry->m_tRow!=RowTagged_t ( tMatch ) )
pEntry = pEntry->m_pNext;
if ( !pEntry )
return 0.0f;
SPH_UDF_FACTORS tUnpacked;
sphinx_factors_init ( &tUnpacked );
#ifndef NDEBUG
Verify ( sphinx_factors_unpack ( (const unsigned int*)pEntry->m_pData, &tUnpacked )==0 );
#else
sphinx_factors_unpack ( (const unsigned int*)pEntry->m_pData, &tUnpacked ); // fix MSVC Release warning
#endif
// compute document length
// OPTIMIZE? could precompute and store total dl in attrs, but at a storage cost
// OPTIMIZE? could at least share between multiple BM25F instances, if there are many
float dl = 0;
CSphAttrLocator tLoc = m_tRankerState.m_tFieldLensLoc;
if ( tLoc.m_iBitOffset>=0 )
{
for ( int i=0; i<m_tRankerState.m_iFields; i++ )
{
dl += tMatch.GetAttr ( tLoc ) * m_dWeights[i];
tLoc.m_iBitOffset += 32;
}
}
// compute (the current instance of) BM25F
float fRes = 0.0f;
for ( int iWord=0; iWord<m_tRankerState.m_iMaxQpos; iWord++ )
{
if ( !tUnpacked.term[iWord].keyword_mask )
continue;
// compute weighted TF
float tf = 0.0f;
for ( int i=0; i<m_tRankerState.m_iFields; i++ )
{
tf += tUnpacked.field_tf[ iWord + 1 + i * ( 1 + m_tRankerState.m_iMaxQpos ) ] * m_dWeights[i];
}
float idf = tUnpacked.term[iWord].idf; // FIXME? zeroed out for dupes!
fRes += tf / ( tf + m_fK1 * ( 1.0f - m_fB + m_fB * dl / m_fWeightedAvgDocLen ) ) * idf;
}
sphinx_factors_deinit ( &tUnpacked );
return fRes + 0.5f; // map to [0..1] range
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
if ( eCmd!=SPH_EXPR_SET_EXTRA_DATA )
return;
bool bGotHash = static_cast<ISphExtra*>(pArg)->ExtraData ( EXTRA_GET_DATA_PACKEDFACTORS, (void**)&m_pHash );
if ( !bGotHash )
return;
bool bGotState = static_cast<ISphExtra*>(pArg)->ExtraData ( EXTRA_GET_DATA_RANKER_STATE, (void**)&m_tRankerState );
if ( !bGotState )
return;
// bind weights
m_dWeights.Resize ( m_tRankerState.m_iFields );
m_dWeights.Fill ( 1 );
if ( m_iCount )
{
for ( int i=0; i<m_iCount; ++i )
{
// FIXME? report errors if field was not found?
CSphString & sField = m_pFieldWeights[i].m_sKey;
int iField = m_tRankerState.m_pSchema->GetFieldIndex ( sField.cstr() );
if ( iField>=0 )
m_dWeights[iField] = m_pFieldWeights[i].m_iValue;
}
}
// compute weighted avgdl
m_fWeightedAvgDocLen = 1.0f;
if ( m_tRankerState.m_pFieldLens )
{
m_fWeightedAvgDocLen = 0.0f;
ARRAY_FOREACH ( i, m_dWeights )
m_fWeightedAvgDocLen += m_tRankerState.m_pFieldLens[i] * m_dWeights[i];
}
m_fWeightedAvgDocLen /= m_tRankerState.m_iTotalDocuments;
}
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & bDisable ) final
{
bDisable = true; // disable caching for now, might add code to process if necessary
return 0;
}
ISphExpr * Clone () const final
{
return new Expr_BM25F_c ( *this );
}
private:
SphExtraDataRankerState_t m_tRankerState;
float m_fK1 {0.0f};
float m_fB {0.0f};
float m_fWeightedAvgDocLen {0.0f};
CSphVector<int> m_dWeights; ///< per field weights
SphFactorHash_t * m_pHash {nullptr};
SharedPtrArr_t<CSphNamedVariant> m_pFieldWeights;
int64_t m_iCount = 0;
Expr_BM25F_c ( const Expr_BM25F_c& rhs )
: m_fK1 ( rhs.m_fK1 )
, m_fB ( rhs.m_fB )
, m_pFieldWeights ( rhs.m_pFieldWeights )
, m_iCount ( rhs.m_iCount )
{}
};
class Expr_GetWeight_c : public Expr_NoLocator_c
{
public:
float Eval ( const CSphMatch & tMatch ) const final { return (float)tMatch.m_iWeight; }
int IntEval ( const CSphMatch & tMatch ) const final { return (int)tMatch.m_iWeight; }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return (int64_t)tMatch.m_iWeight; }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_GetWeight_c");
return CALC_DEP_HASHES();
}
ISphExpr* Clone() const final
{
return new Expr_GetWeight_c;
}
};
//////////////////////////////////////////////////////////////////////////
class Expr_Arglist_c : public ISphExpr
{
friend void MoveToArgList ( ISphExpr * pLeft, VecRefPtrs_t<ISphExpr*> & dArgs );
public:
Expr_Arglist_c ( ISphExpr * pLeft, ISphExpr * pRight )
{
AddArgs ( pLeft );
AddArgs ( pRight );
}
bool IsArglist () const final
{
return true;
}
ISphExpr * GetArg ( int i ) const final
{
if ( i>=m_dArgs.GetLength() )
return nullptr;
return m_dArgs[i];
}
int GetNumArgs() const final
{
return m_dArgs.GetLength();
}
float Eval ( const CSphMatch & ) const final
{
assert ( 0 && "internal error: Eval() must not be explicitly called on arglist" );
return 0.0f;
}
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) final
{
for ( auto i : m_dArgs )
i->FixupLocator ( pOldSchema, pNewSchema );
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
for ( auto i : m_dArgs )
i->Command ( eCmd, pArg );
}
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & ) final
{
assert ( 0 && "internal error: GetHash() must not be explicitly called on arglist" );
return 0;
}
ISphExpr * Clone () const final
{
return new Expr_Arglist_c ( *this );
}
private:
VecRefPtrs_t<ISphExpr*> m_dArgs;
void AddArgs ( ISphExpr * pExpr )
{
// not an arglist? just add it
if ( !pExpr->IsArglist () )
{
m_dArgs.Add ( pExpr );
SafeAddRef ( pExpr );
return;
}
// arglist? take ownership of its args, and dismiss it
auto * pArgs = ( Expr_Arglist_c * ) pExpr;
m_dArgs.Append ( pArgs->m_dArgs );
pArgs->m_dArgs.Reset ();
}
Expr_Arglist_c ( const Expr_Arglist_c& rhs )
{
m_dArgs.Resize ( rhs.m_dArgs.GetLength () );
ARRAY_FOREACH ( i, m_dArgs )
m_dArgs[i] = SafeClone (rhs.m_dArgs[i]);
}
};
//////////////////////////////////////////////////////////////////////////
// very deep expression needs special stack for destroy
class Expr_ProxyFat_c final : public Expr_Unary_c
{
public:
explicit Expr_ProxyFat_c ( ISphExpr * pExpr )
: Expr_Unary_c ( "Expr_ProxyFat_c", pExpr )
{}
float Eval ( const CSphMatch & tMatch ) const final { return m_pFirst->Eval(tMatch); }
int IntEval ( const CSphMatch & tMatch ) const final { return m_pFirst->IntEval(tMatch); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return m_pFirst->Int64Eval(tMatch); }
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const final { return m_pFirst->StringEval(tMatch,ppStr); }
const BYTE * StringEvalPacked ( const CSphMatch & tMatch ) const final { return m_pFirst->StringEvalPacked(tMatch); }
ByteBlob_t MvaEval ( const CSphMatch & tMatch) const final { return m_pFirst->MvaEval(tMatch); }
const BYTE * FactorEval ( const CSphMatch & tMatch) const final { return m_pFirst->FactorEval(tMatch); }
const BYTE * FactorEvalPacked ( const CSphMatch & tMatch) const final { return m_pFirst->FactorEvalPacked(tMatch); }
bool IsArglist () const final { return m_pFirst->IsArglist(); }
bool IsColumnar ( bool * pStored ) const final { return m_pFirst->IsColumnar(pStored); }
bool IsDataPtrAttr () const final { return m_pFirst->IsDataPtrAttr(); }
ISphExpr * GetArg ( int i ) const final { return m_pFirst->GetArg(i); }
int GetNumArgs() const final { return m_pFirst->GetNumArgs(); }
bool IsConst () const final { return m_pFirst->IsConst(); }
bool IsJson ( bool & bConverted ) const final { return m_pFirst->IsJson( bConverted); }
ISphExpr * Clone () const final { return new Expr_ProxyFat_c ( *this ); }
protected:
Expr_ProxyFat_c ( const Expr_ProxyFat_c & ) = default;
~Expr_ProxyFat_c() final
{
if ( !m_pFirst || !m_pFirst->IsLast() )
return;
auto iStackNeeded = Threads::GetStackUsed ();
auto iCurStackSize = Threads::MyStackSize ();
int iNeedInAdvance = session::GetDesiredStack ();
if ( iNeedInAdvance<=0 )
iStackNeeded = iCurStackSize * 2; // just from the fact that we're here, as ProxyFat is created by demand.
else
iStackNeeded += iNeedInAdvance * 2; // fixme! * 2 is not precise grade!
if ( iStackNeeded<=iCurStackSize )
return;
// special deep-expression delete - take subexpr and release it from dedicated coro with increased stack.
auto pExpr = m_pFirst.Leak();
assert ( pExpr && pExpr->IsLast () );
Threads::Coro::Continue ( (int) iStackNeeded, [pExpr] { pExpr->Release(); } );
}
};
class Expr_StrLength_c : public Expr_Unary_c
{
public:
explicit Expr_StrLength_c ( ISphExpr * pArg )
: Expr_Unary_c ( "Expr_StrLength_c", pArg )
{}
int IntEval ( const CSphMatch & tMatch ) const final
{
const BYTE * pStr = nullptr;
int iLen = m_pFirst->StringEval ( tMatch, &pStr );
FreeDataPtr ( *m_pFirst, pStr );
return iLen;
}
float Eval ( const CSphMatch & tMatch ) const final { return (float)IntEval ( tMatch ); }
ISphExpr * Clone () const final
{
return new Expr_StrLength_c ( *this );
}
private:
Expr_StrLength_c ( const Expr_StrLength_c& ) = default;
};
class Expr_Crc32_c : public Expr_Unary_c
{
public:
explicit Expr_Crc32_c ( ISphExpr * pFirst )
: Expr_Unary_c ( "Expr_Crc32_c", pFirst )
{}
float Eval ( const CSphMatch & tMatch ) const final { return (float)IntEval ( tMatch ); }
int IntEval ( const CSphMatch & tMatch ) const final
{
const BYTE * pStr;
int iLen = m_pFirst->StringEval ( tMatch, &pStr );
DWORD uCrc = sphCRC32 ( pStr, iLen );
FreeDataPtr ( *m_pFirst, pStr );
return uCrc;
}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return (int64_t)(DWORD)IntEval ( tMatch ); }
ISphExpr * Clone () const final
{
return new Expr_Crc32_c ( *this );
}
private:
Expr_Crc32_c ( const Expr_Crc32_c& ) = default;
};
static inline int Fibonacci ( int i )
{
if ( i<0 )
return 0;
int f0 = 0;
int f1 = 1;
int j = 0;
for ( j=0; j+1<i; j+=2 )
{
f0 += f1; // f_j
f1 += f0; // f_{j+1}
}
return ( i & 1 ) ? f1 : f0;
}
class Expr_Fibonacci_c : public Expr_Unary_c
{
public:
explicit Expr_Fibonacci_c ( ISphExpr * pFirst )
: Expr_Unary_c ( "Expr_Fibonacci_c", pFirst )
{}
float Eval ( const CSphMatch & tMatch ) const final { return (float)IntEval ( tMatch ); }
int IntEval ( const CSphMatch & tMatch ) const final { return Fibonacci ( m_pFirst->IntEval ( tMatch ) ); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return IntEval ( tMatch ); }
ISphExpr * Clone () const final
{
return new Expr_Fibonacci_c ( *this );
}
private:
Expr_Fibonacci_c ( const Expr_Fibonacci_c& ) = default;
};
class Expr_Concat_c : public Expr_NoLocator_c
{
public:
explicit Expr_Concat_c ( const CSphVector<ISphExpr *> & dArgs, const CSphVector<bool> & dConstStr )
{
// pre-eval const strings
CSphVector<const BYTE *> dEvaluated ( dArgs.GetLength() );
m_dArgs.Resize ( dArgs.GetLength() );
CSphMatch tMatch; // fake match
ARRAY_FOREACH ( i, m_dArgs )
{
if ( dConstStr[i] )
dArgs[i]->StringEval ( tMatch, &dEvaluated[i] );
else
dEvaluated[i] = nullptr;
}
// pre-concat const strings
int iArg = 0;
while ( iArg < m_dArgs.GetLength() )
{
StringOrExpr_t & tArg = m_dArgs[iArg];
if ( dEvaluated[iArg] )
{
for ( ; iArg < dEvaluated.GetLength() && dEvaluated[iArg]; iArg++ )
{
auto iLen = (int) strlen ( (const char *)dEvaluated[iArg] );
memcpy ( tArg.m_dBuffer.AddN (iLen), dEvaluated[iArg], iLen );
}
}
else
tArg.m_pExpr = dArgs[iArg++];
}
// remove gaps (if any)
ARRAY_FOREACH ( i, m_dArgs )
if ( !m_dArgs[i].m_dBuffer.GetLength() && !m_dArgs[i].m_pExpr )
m_dArgs.Remove(i--);
for ( auto & i : m_dArgs )
SafeAddRef ( i.m_pExpr );
}
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const final
{
CSphVector<BYTE> dResult;
int iResultLen = EvalMatch ( tMatch, dResult );
*ppStr = dResult.LeakData();
return iResultLen;
}
const BYTE * StringEvalPacked ( const CSphMatch & tMatch ) const final
{
// this is done to avoid reallocation while re-packing the result of StringEval call
TightPackedVec_T<BYTE> dResult;
int iResultLen = EvalMatch ( tMatch, dResult );
sphPackPtrAttrInPlace ( dResult, iResultLen );
return dResult.LeakData();
}
float Eval ( const CSphMatch & ) const final { assert ( 0 ); return 0; }
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
for ( auto & i : m_dArgs )
if ( i.m_pExpr )
i.m_pExpr->Command ( eCmd, pArg );
}
bool IsDataPtrAttr() const final
{
return true;
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_Concat_c");
for ( const auto & i : m_dArgs )
if ( i.m_pExpr ) uHash = i.m_pExpr->GetHash ( tSorterSchema, uHash, bDisable );
return CALC_DEP_HASHES();
}
ISphExpr* Clone() const final
{
return new Expr_Concat_c ( *this );
}
protected:
struct StringOrExpr_t
{
CSphVector<BYTE> m_dBuffer;
CSphRefcountedPtr<ISphExpr> m_pExpr {nullptr};
};
CSphVector<StringOrExpr_t> m_dArgs;
template<class POLICY, class LIMIT, class STORE>
int EvalMatch ( const CSphMatch & tMatch, sph::Vector_T<BYTE, POLICY, LIMIT, STORE> & dResult ) const
{
for ( auto & i : m_dArgs )
{
const BYTE * pStr = nullptr;
int iLen;
if ( i.m_pExpr )
iLen = i.m_pExpr->StringEval ( tMatch, &pStr );
else
{
iLen = i.m_dBuffer.GetLength();
pStr = i.m_dBuffer.Begin();
}
if ( pStr )
dResult.Append ( const_cast<BYTE*>(pStr), iLen );
FreeDataPtr ( i.m_pExpr, pStr );
}
dResult.Add('\0');
return dResult.GetLength()-1;
}
private:
Expr_Concat_c ( const Expr_Concat_c& rhs )
{
m_dArgs.Resize ( rhs.m_dArgs.GetLength ());
ARRAY_FOREACH ( i, m_dArgs )
{
m_dArgs[i].m_dBuffer = rhs.m_dArgs[i].m_dBuffer;
m_dArgs[i].m_pExpr = SafeClone ( rhs.m_dArgs[i].m_pExpr );
}
}
};
class Expr_ToString_c : public Expr_Unary_c
{
public:
Expr_ToString_c ( ISphExpr * pArg, ESphAttr eArg )
: Expr_Unary_c ( "Expr_ToString_c", pArg )
, m_eArg ( eArg )
{}
float Eval ( const CSphMatch & ) const final
{
assert ( 0 );
return 0.0f;
}
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const final
{
m_sBuilder.Clear();
uint64_t uPacked = 0;
ESphJsonType eJson = JSON_NULL;
uint64_t uOff = 0;
int iLen = 0;
switch ( m_eArg )
{
case SPH_ATTR_INTEGER: m_sBuilder.Appendf ( "%u", m_pFirst->IntEval ( tMatch ) ); break;
case SPH_ATTR_BIGINT: m_sBuilder.Appendf ( INT64_FMT, m_pFirst->Int64Eval ( tMatch ) ); break;
case SPH_ATTR_FLOAT: m_sBuilder.Appendf ( "%f", m_pFirst->Eval ( tMatch ) ); break;
case SPH_ATTR_UINT32SET:
case SPH_ATTR_INT64SET:
case SPH_ATTR_UINT32SET_PTR:
case SPH_ATTR_INT64SET_PTR:
{
auto dMva = m_pFirst->MvaEval ( tMatch );
sphMVA2Str ( dMva, m_eArg==SPH_ATTR_INT64SET || m_eArg==SPH_ATTR_INT64SET_PTR, m_sBuilder );
}
break;
case SPH_ATTR_FLOAT_VECTOR:
case SPH_ATTR_FLOAT_VECTOR_PTR:
sphFloatVec2Str ( m_pFirst->MvaEval(tMatch), m_sBuilder );
break;
case SPH_ATTR_STRING:
{
CSphVector<BYTE> dTmp;
iLen = m_pFirst->StringEval ( tMatch, ppStr );
dTmp.Resize(iLen+1);
if ( ppStr )
{
memcpy ( dTmp.Begin(), *ppStr, iLen );
dTmp[iLen] = '\0';
}
else
dTmp[0] = '\0';
*ppStr = dTmp.LeakData();
return iLen;
}
case SPH_ATTR_STRINGPTR:
return m_pFirst->StringEval ( tMatch, ppStr );
case SPH_ATTR_JSON_FIELD:
uPacked = m_pFirst->Int64Eval ( tMatch );
eJson = sphJsonUnpackType ( uPacked );
uOff = sphJsonUnpackOffset ( uPacked );
if ( !uOff || eJson==JSON_NULL )
{
*ppStr = nullptr;
iLen = 0;
} else
{
JsonEscapedBuilder dTmp;
sphJsonFieldFormat ( dTmp, m_pBlobPool+uOff, eJson, false );
iLen = dTmp.GetLength();
*ppStr = dTmp.Leak();
}
return iLen;
default:
assert ( 0 && "unhandled arg type in TO_STRING()" );
break;
}
if ( !m_sBuilder.GetLength() )
{
*ppStr = nullptr;
return 0;
}
auto iRes = m_sBuilder.GetLength ();
*ppStr = m_sBuilder.Leak();
return iRes;
}
bool IsDataPtrAttr() const final
{
if ( m_eArg==SPH_ATTR_STRINGPTR )
return m_pFirst->IsDataPtrAttr();
return true;
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
if ( eCmd==SPH_EXPR_SET_BLOB_POOL )
m_pBlobPool = (const BYTE*)pArg;
m_pFirst->Command ( eCmd, pArg );
}
ISphExpr* Clone() const final
{
return new Expr_ToString_c ( *this );
}
private:
ESphAttr m_eArg;
const BYTE * m_pBlobPool {nullptr};
mutable StringBuilder_c m_sBuilder;
private:
Expr_ToString_c ( const Expr_ToString_c& rhs )
: Expr_Unary_c ( rhs )
, m_eArg ( rhs.m_eArg )
{}
};
//////////////////////////////////////////////////////////////////////////
/// generic JSON value evaluation
/// can handle arbitrary stacks of jsoncol.key1.arr2[indexexpr3].key4[keynameexpr5]
/// m_dArgs holds the expressions that return actual accessors (either keynames or indexes)
/// m_dRetTypes holds their respective types
class Expr_JsonField_c : public Expr_WithLocator_c
{
public:
/// takes over the expressions
Expr_JsonField_c ( const CSphAttrLocator & tLocator, const CSphString & sAttr, CSphVector<ISphExpr*> & dArgs, CSphVector<ESphAttr> & dRetTypes )
: Expr_WithLocator_c ( tLocator, sAttr )
{
assert ( dArgs.GetLength()==dRetTypes.GetLength() );
m_dArgs.SwapData ( dArgs );
m_dRetTypes.SwapData ( dRetTypes );
}
void Command ( ESphExprCommand eCmd, void * pArg ) override
{
Expr_WithLocator_c::Command ( eCmd, pArg );
switch ( eCmd )
{
case SPH_EXPR_SET_BLOB_POOL:
m_pBlobPool = (const BYTE*)pArg;
break;
case SPH_EXPR_GET_DEPENDENT_COLS:
if ( !m_sAttr.IsEmpty() )
static_cast<StrVec_t*>(pArg)->Add(m_sAttr);
break;
case SPH_EXPR_FORMAT_AS_TEXT:
if ( !m_sAttr.IsEmpty() && m_dArgs.all_of( []( auto & pExpr ){ return pExpr->IsConst(); } ) )
{
auto pSchemaWithName = static_cast<std::pair<const ISphSchema*,CSphString>*>(pArg);
const CSphColumnInfo * pAttr = pSchemaWithName->first->GetAttr ( m_sAttr.cstr() );
assert(pAttr);
if ( m_dArgs.IsEmpty() )
pSchemaWithName->second = pAttr->m_sName;
else
{
CSphString sAllFields;
ARRAY_FOREACH ( i, m_dArgs )
{
CSphMatch tStub;
CSphString sArg;
switch ( m_dRetTypes[i] )
{
case SPH_ATTR_INTEGER:
sArg.SetSprintf ( "[%d]", m_dArgs[i]->IntEval(tStub) );
break;
case SPH_ATTR_BIGINT:
sArg.SetSprintf ( "[" INT64_FMT "]", m_dArgs[i]->Int64Eval(tStub) );
break;
case SPH_ATTR_STRING:
{
const BYTE * pStr;
int iLen = m_dArgs[i]->StringEval ( tStub, &pStr );
sArg.SetSprintf ( "['%s']", CSphString ( (const char*)pStr, iLen ).cstr() );
}
break;
default:
break;
}
if ( sAllFields.IsEmpty() )
sAllFields = sArg;
else
sAllFields.SetSprintf ( "%s%s", sAllFields.cstr(), sArg.cstr() );
}
pSchemaWithName->second.SetSprintf ( "%s%s", pAttr->m_sName.cstr(), sAllFields.cstr() );
}
}
break;
default:
break;
}
for ( auto & pExpr : m_dArgs )
if ( pExpr )
pExpr->Command ( eCmd, pArg );
}
float Eval ( const CSphMatch & ) const final
{
assert ( 0 && "one just does not simply evaluate a JSON as float" );
return 0;
}
int64_t DoEval ( ESphJsonType eJson, const BYTE * pVal, const CSphMatch & tMatch ) const
{
int iLen;
const BYTE * pStr;
ARRAY_FOREACH ( i, m_dRetTypes )
{
switch ( m_dRetTypes[i] )
{
case SPH_ATTR_INTEGER: eJson = sphJsonFindByIndex ( eJson, &pVal, m_dArgs[i]->IntEval ( tMatch ) ); break;
case SPH_ATTR_BIGINT: eJson = sphJsonFindByIndex ( eJson, &pVal, (int)m_dArgs[i]->Int64Eval ( tMatch ) ); break;
case SPH_ATTR_FLOAT: eJson = sphJsonFindByIndex ( eJson, &pVal, (int)m_dArgs[i]->Eval ( tMatch ) ); break;
case SPH_ATTR_STRING:
// is this assert will fail someday it's ok
// just remove it and add this code instead to handle possible memory leak
// if ( m_dArgv[i]->IsDataPtrAttr() ) SafeDeleteArray ( pStr );
assert ( !m_dArgs[i]->IsDataPtrAttr() );
iLen = m_dArgs[i]->StringEval ( tMatch, &pStr );
eJson = sphJsonFindByKey ( eJson, &pVal, (const void *)pStr, iLen, sphJsonKeyMask ( (const char *)pStr, iLen ) );
break;
case SPH_ATTR_JSON_FIELD: // handle cases like "json.a [ json.b ]"
{
uint64_t uPacked = m_dArgs[i]->Int64Eval ( tMatch );
ESphJsonType eType = sphJsonUnpackType ( uPacked );
const BYTE * p = m_pBlobPool + sphJsonUnpackOffset ( uPacked );
switch ( eType )
{
case JSON_INT32: eJson = sphJsonFindByIndex ( eJson, &pVal, sphJsonLoadInt ( &p ) ); break;
case JSON_INT64: eJson = sphJsonFindByIndex ( eJson, &pVal, (int)sphJsonLoadBigint ( &p ) ); break;
case JSON_DOUBLE: eJson = sphJsonFindByIndex ( eJson, &pVal, (int)sphQW2D ( sphJsonLoadBigint ( &p ) ) ); break;
case JSON_STRING:
iLen = sphJsonUnpackInt ( &p );
eJson = sphJsonFindByKey ( eJson, &pVal, (const void *)p, iLen, sphJsonKeyMask ( (const char *)p, iLen ) );
break;
default:
return 0;
}
break;
}
default:
return 0;
}
if ( eJson==JSON_EOF )
return 0;
}
// keep actual attribute type and offset to data packed
return sphJsonPackTypeOffset ( eJson, pVal-m_pBlobPool );
}
int64_t Int64Eval ( const CSphMatch & tMatch ) const override
{
if ( !m_pBlobPool )
return 0;
if ( m_tLocator.m_bDynamic )
{
// extends precalculated (aliased) field
uint64_t uPacked = tMatch.GetAttr ( m_tLocator );
if ( !uPacked )
return 0;
ESphJsonType eType = sphJsonUnpackType ( uPacked );
const BYTE * pVal = m_pBlobPool + sphJsonUnpackOffset ( uPacked );
return DoEval ( eType, pVal, tMatch );
}
int iLengthBytes = 0;
const BYTE * pVal = sphGetBlobAttr ( tMatch, m_tLocator, m_pBlobPool, iLengthBytes );
if ( !pVal )
return 0;
ESphJsonType eJson = sphJsonFindFirst ( &pVal );
return DoEval ( eJson, pVal, tMatch );
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_JsonField_c");
CALC_POD_HASHES(m_dRetTypes);
CALC_CHILD_HASHES(m_dArgs);
return CALC_DEP_HASHES();
}
bool IsJson ( bool & bConverted ) const final
{
bConverted = false;
return true;
}
ISphExpr* Clone() const override
{
return new Expr_JsonField_c ( *this );
}
protected:
const BYTE * m_pBlobPool {nullptr};
private:
VecRefPtrs_t<ISphExpr*> m_dArgs;
CSphVector<ESphAttr> m_dRetTypes;
protected:
Expr_JsonField_c ( const Expr_JsonField_c & rhs )
: Expr_WithLocator_c ( rhs )
, m_dRetTypes ( rhs.m_dRetTypes )
{
m_dArgs.Resize ( rhs.m_dArgs.GetLength ());
ARRAY_FOREACH ( i, m_dArgs )
m_dArgs[i] = SafeClone (rhs.m_dArgs[i]);
}
};
/// fastpath (instead of generic JsonField_c) for jsoncol.key access by a static key name
class Expr_JsonFastKey_c : public Expr_WithLocator_c
{
public:
/// takes over the expressions
Expr_JsonFastKey_c ( const CSphAttrLocator & tLocator, const CSphString & sAttr, ISphExpr * pArg )
: Expr_WithLocator_c ( tLocator, sAttr )
{
auto * pKey = (Expr_GetStrConst_c*)pArg;
m_sKey = pKey->GetStr();
m_iKeyLen = m_sKey.Length();
m_uKeyBloom = sphJsonKeyMask ( m_sKey.cstr(), m_iKeyLen );
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
Expr_WithLocator_c::Command ( eCmd, pArg );
switch ( eCmd )
{
case SPH_EXPR_SET_BLOB_POOL:
m_pBlobPool = (const BYTE*)pArg;
break;
case SPH_EXPR_FORMAT_AS_TEXT:
if ( !m_sAttr.IsEmpty() )
{
auto pSchemaWithName = static_cast<std::pair<const ISphSchema*,CSphString>*>(pArg);
const CSphColumnInfo * pAttr = pSchemaWithName->first->GetAttr ( m_sAttr.cstr() );
assert(pAttr);
pSchemaWithName->second.SetSprintf ( "%s['%s']", pAttr->m_sName.cstr(), m_sKey.cstr() );
}
break;
default:
break;
}
}
float Eval ( const CSphMatch & ) const final
{
assert ( 0 && "one just does not simply evaluate a JSON as float" );
return 0;
}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final
{
// get pointer to JSON blob data
const BYTE * pJson = sphGetBlobAttr ( tMatch, m_tLocator, m_pBlobPool ).first;
if ( !pJson )
return 0;
// all root objects start with a Bloom mask; quickly check it
if ( ( sphGetDword(pJson) & m_uKeyBloom )!=m_uKeyBloom )
return 0;
// OPTIMIZE? FindByKey does an extra (redundant) bloom check inside
ESphJsonType eJson = sphJsonFindByKey ( JSON_ROOT, &pJson, m_sKey.cstr(), m_iKeyLen, m_uKeyBloom );
if ( eJson==JSON_EOF )
return 0;
// keep actual attribute type and offset to data packed
return sphJsonPackTypeOffset ( eJson, pJson-m_pBlobPool );
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_JsonFastKey_c");
CALC_STR_HASH(m_sKey,m_iKeyLen);
return CALC_DEP_HASHES();
}
bool IsJson ( bool & bConverted ) const final
{
bConverted = false;
return true;
}
ISphExpr* Clone() const final
{
return new Expr_JsonFastKey_c ( *this );
}
protected:
const BYTE * m_pBlobPool {nullptr};
CSphString m_sKey;
int m_iKeyLen {0};
DWORD m_uKeyBloom {0};
private:
Expr_JsonFastKey_c ( const Expr_JsonFastKey_c & rhs )
: Expr_WithLocator_c ( rhs )
, m_sKey ( rhs.m_sKey )
, m_iKeyLen ( rhs.m_iKeyLen )
, m_uKeyBloom ( rhs.m_uKeyBloom )
{}
};
static ESphJsonType GetKey ( const BYTE ** ppKey, const CSphMatch & tMatch, const BYTE * pBlobPool, const CSphRefcountedPtr<ISphExpr> & pArg )
{
assert ( ppKey );
if ( !pBlobPool || !pArg )
return JSON_EOF;
uint64_t uPacked = pArg->Int64Eval ( tMatch );
*ppKey = pBlobPool + sphJsonUnpackOffset ( uPacked );
return sphJsonUnpackType ( uPacked );
}
class Expr_JsonFieldConv_c : public ISphExpr
{
public:
explicit Expr_JsonFieldConv_c ( ISphExpr * pArg )
: m_pArg { pArg }
{
SafeAddRef ( pArg );
}
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const override
{
const BYTE * pVal = nullptr;
ESphJsonType eJson = GetKey ( &pVal, tMatch );
if ( eJson!=JSON_STRING)
return 0;
// using sphUnpackStr() is wrong, because BSON uses different store format of string length
int iLen = sphJsonUnpackInt ( &pVal );
*ppStr = pVal;
return iLen;
}
float Eval ( const CSphMatch & tMatch ) const override { return DoEval<float> ( tMatch ); }
int IntEval ( const CSphMatch & tMatch ) const override { return DoEval<int> ( tMatch ); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const override { return DoEval<int64_t> ( tMatch ); }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) override
{
EXPR_CLASS_NAME("Expr_JsonFieldConv_c");
return CALC_PARENT_HASH();
}
void Command ( ESphExprCommand eCmd, void * pArg ) override
{
if ( eCmd==SPH_EXPR_SET_BLOB_POOL )
m_pBlobPool = (const BYTE*)pArg;
if ( m_pArg )
m_pArg->Command ( eCmd, pArg );
}
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) override
{
if ( m_pArg )
m_pArg->FixupLocator ( pOldSchema, pNewSchema );
}
ISphExpr* Clone() const override
{
return new Expr_JsonFieldConv_c ( *this );
}
protected:
const BYTE * m_pBlobPool {nullptr};
CSphRefcountedPtr<ISphExpr> m_pArg;
ESphJsonType GetKey ( const BYTE ** ppKey, const CSphMatch & tMatch ) const
{
return ::GetKey ( ppKey, tMatch, m_pBlobPool, m_pArg );
}
// generic evaluate
template < typename T >
T DoEval ( const CSphMatch & tMatch ) const
{
const BYTE * pVal = nullptr;
ESphJsonType eJson = GetKey ( &pVal, tMatch );
switch ( eJson )
{
case JSON_INT32: return (T)sphJsonLoadInt ( &pVal );
case JSON_INT64: return (T)sphJsonLoadBigint ( &pVal );
case JSON_DOUBLE: return (T)sphQW2D ( sphJsonLoadBigint ( &pVal ) );
case JSON_TRUE: return 1;
case JSON_STRING:
{
int iLen = sphJsonUnpackInt ( &pVal );
int64_t iVal;
double fVal;
ESphJsonType eType;
if ( sphJsonStringToNumber ( (const char*)pVal, iLen, eType, iVal, fVal ) )
return eType==JSON_DOUBLE ? (T)fVal : (T)iVal;
return 0;
}
default: return 0;
}
}
uint64_t CalcHash ( const char * szTag, const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME_NOCHECK(szTag);
CALC_CHILD_HASH(m_pArg);
return CALC_DEP_HASHES();
}
bool IsJson ( bool & bConverted ) const final
{
bConverted = true;
return true;
}
Expr_JsonFieldConv_c ( const Expr_JsonFieldConv_c& rhs )
: m_pArg { SafeClone (rhs.m_pArg) }
{}
};
template <typename T>
T JsonAggr ( ESphJsonType eJson, const BYTE * pVal, ESphAggrFunc eFunc, CSphString * pBuf )
{
if ( !pVal || ( eFunc!=SPH_AGGR_MIN && eFunc!=SPH_AGGR_MAX ) )
return 0;
switch ( eJson )
{
case JSON_INT32_VECTOR:
{
int iVals = sphJsonUnpackInt ( &pVal );
if ( iVals==0 )
return 0;
auto * p = (const int*) pVal;
int iRes = *p; // first value
switch ( eFunc )
{
case SPH_AGGR_MIN: while ( --iVals ) if ( *++p<iRes ) iRes = *p; break;
case SPH_AGGR_MAX: while ( --iVals ) if ( *++p>iRes ) iRes = *p; break;
default: return 0;
}
return (T)iRes;
}
case JSON_DOUBLE_VECTOR:
{
int iLen = sphJsonUnpackInt ( &pVal );
if ( !iLen || ( eFunc!=SPH_AGGR_MIN && eFunc!=SPH_AGGR_MAX ) )
return 0;
double fRes = ( eFunc==SPH_AGGR_MIN ? FLT_MAX : FLT_MIN );
const BYTE * p = pVal;
for ( int i=0; i<iLen; i++ )
{
double fStored = sphQW2D ( sphJsonLoadBigint ( &p ) );
switch ( eFunc )
{
case SPH_AGGR_MIN:
fRes = Min ( fRes, fStored );
break;
case SPH_AGGR_MAX:
fRes = Max ( fRes, fStored );
break;
default: return 0;
}
}
return (T)fRes;
}
case JSON_STRING_VECTOR:
{
if ( !pBuf )
return 0;
sphJsonUnpackInt ( &pVal ); // skip node length
int iVals = sphJsonUnpackInt ( &pVal );
if ( iVals==0 )
return 0;
// first value
int iLen = sphJsonUnpackInt ( &pVal );
auto * pRes = (const char* )pVal;
int iResLen = iLen;
while ( --iVals )
{
pVal += iLen;
iLen = sphJsonUnpackInt ( &pVal );
// binary string comparison
int iCmp = memcmp ( pRes, (const char*)pVal, iLen<iResLen ? iLen : iResLen );
if ( iCmp==0 && iLen!=iResLen )
iCmp = iResLen-iLen;
if ( ( eFunc==SPH_AGGR_MIN && iCmp>0 ) || ( eFunc==SPH_AGGR_MAX && iCmp<0 ) )
{
pRes = (const char*)pVal;
iResLen = iLen;
}
}
pBuf->SetBinary ( pRes, iResLen );
return (T)iResLen;
}
case JSON_MIXED_VECTOR:
{
sphJsonUnpackInt ( &pVal ); // skip node length
int iLen = sphJsonUnpackInt ( &pVal );
if ( !iLen || ( eFunc!=SPH_AGGR_MIN && eFunc!=SPH_AGGR_MAX ) )
return 0;
double fRes = ( eFunc==SPH_AGGR_MIN ? FLT_MAX : FLT_MIN );
for ( int i=0; i<iLen; i++ )
{
double fVal = ( eFunc==SPH_AGGR_MIN ? FLT_MAX : FLT_MIN );
auto eType = (ESphJsonType)*pVal++;
switch (eType)
{
case JSON_INT32:
case JSON_INT64:
fVal = (double)( eType==JSON_INT32 ? sphJsonLoadInt ( &pVal ) : sphJsonLoadBigint ( &pVal ) );
break;
case JSON_DOUBLE:
fVal = sphQW2D ( sphJsonLoadBigint ( &pVal ) );
break;
default:
sphJsonSkipNode ( eType, &pVal );
break; // for weird subobjects, just let min
}
switch ( eFunc )
{
case SPH_AGGR_MIN:
fRes = Min ( fRes, fVal );
break;
case SPH_AGGR_MAX:
fRes = Max ( fRes, fVal );
break;
default: return 0;
}
}
return (T)fRes;
}
default: return 0;
}
}
class Expr_JsonFieldAggr_c : public Expr_JsonFieldConv_c
{
public:
Expr_JsonFieldAggr_c ( ISphExpr * pArg, ESphAggrFunc eFunc )
: Expr_JsonFieldConv_c ( pArg )
, m_eFunc ( eFunc )
{}
int IntEval ( const CSphMatch & tMatch ) const final
{
const BYTE * pVal = nullptr;
ESphJsonType eJson = GetKey ( &pVal, tMatch );
return JsonAggr<int> ( eJson, pVal, m_eFunc, nullptr );
}
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const final
{
CSphString sBuf;
*ppStr = nullptr;
const BYTE * pVal = nullptr;
ESphJsonType eJson = GetKey ( &pVal, tMatch );
int iLen = 0;
int iVal = 0;
float fVal = 0.0f;
switch ( eJson )
{
case JSON_INT32_VECTOR:
iVal = JsonAggr<int> ( eJson, pVal, m_eFunc, nullptr );
sBuf.SetSprintf ( "%u", iVal );
iLen = sBuf.Length();
*ppStr = (const BYTE *) sBuf.Leak();
return iLen;
case JSON_STRING_VECTOR:
JsonAggr<int> ( eJson, pVal, m_eFunc, &sBuf );
iLen = sBuf.Length();
*ppStr = (const BYTE *) sBuf.Leak();
return iLen;
case JSON_DOUBLE_VECTOR:
case JSON_MIXED_VECTOR:
fVal = JsonAggr<float> ( eJson, pVal, m_eFunc, nullptr );
sBuf.SetSprintf ( "%f", fVal );
iLen = sBuf.Length();
*ppStr = (const BYTE *) sBuf.Leak();
return iLen;
default: return 0;
}
}
float Eval ( const CSphMatch & tMatch ) const final
{
const BYTE * pVal = nullptr;
ESphJsonType eJson = GetKey ( &pVal, tMatch );
return JsonAggr<float> ( eJson, pVal, m_eFunc, nullptr );
}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final
{
const BYTE * pVal = nullptr;
ESphJsonType eJson = GetKey ( &pVal, tMatch );
return JsonAggr<int64_t> ( eJson, pVal, m_eFunc, nullptr );
}
bool IsDataPtrAttr() const final { return true; }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_JsonFieldAggr_c");
CALC_POD_HASH(m_eFunc);
return CALC_PARENT_HASH();
}
ISphExpr* Clone() const final
{
return new Expr_JsonFieldAggr_c ( *this );
}
protected:
ESphAggrFunc m_eFunc{SPH_AGGR_NONE};
Expr_JsonFieldAggr_c ( const Expr_JsonFieldAggr_c& ) = default;
};
class Expr_JsonFieldLength_c : public Expr_JsonFieldConv_c
{
public:
explicit Expr_JsonFieldLength_c ( ISphExpr * pArg )
: Expr_JsonFieldConv_c ( pArg )
{}
int IntEval ( const CSphMatch & tMatch ) const final
{
const BYTE * pVal = nullptr;
ESphJsonType eJson = GetKey ( &pVal, tMatch );
return sphJsonFieldLength ( eJson, pVal );
}
float Eval ( const CSphMatch & tMatch ) const final { return (float)IntEval ( tMatch ); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return (int64_t)IntEval ( tMatch ); }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_JsonFieldLength_c");
return CALC_PARENT_HASH();
}
ISphExpr* Clone() const final
{
return new Expr_JsonFieldLength_c ( *this );
}
private:
Expr_JsonFieldLength_c ( const Expr_JsonFieldLength_c& ) = default;
};
class Expr_SubstringIndex_c : public ISphStringExpr
{
private:
CSphRefcountedPtr<ISphExpr> m_pArg;
CSphString m_sDelim;
int m_iCount = 0;
int m_iLenDelim = 0;
bool m_bFreeResPtr = false;
public:
explicit Expr_SubstringIndex_c ( ISphExpr * pArg, ISphExpr * pDelim, ISphExpr * pCount )
: m_pArg ( pArg )
, m_iCount ( 0 )
, m_bFreeResPtr ( false )
{
assert ( pArg && pDelim && pCount );
SafeAddRef ( pArg );
m_bFreeResPtr = m_pArg->IsDataPtrAttr();
const BYTE * pBuf = nullptr;
CSphMatch tTmp;
m_iLenDelim = pDelim->StringEval ( tTmp, &pBuf );
m_sDelim.SetBinary ( (const char *)pBuf, m_iLenDelim );
FreeDataPtr ( *pDelim, pBuf );
m_iCount = pCount->IntEval ( tTmp );
}
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) override
{
if ( m_pArg )
m_pArg->FixupLocator ( pOldSchema, pNewSchema );
}
void Command ( ESphExprCommand eCmd, void * pArg ) override
{
if ( m_pArg )
m_pArg->Command ( eCmd, pArg );
}
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const final
{
const char* pDoc = nullptr;
int iDocLen = m_pArg->StringEval ( tMatch, (const BYTE **)&pDoc );
int iLength = 0;
*ppStr = nullptr;
if ( pDoc && iDocLen>0 && m_iLenDelim>0 && m_iCount!=0 )
{
if ( m_iCount>0 )
LeftSearch ( pDoc, iDocLen, m_iCount, false, ppStr, &iLength );
else
RightSearch ( pDoc, iDocLen, m_iCount, ppStr, &iLength );
}
FreeDataPtr ( *m_pArg, pDoc );
return iLength;
}
bool IsDataPtrAttr() const final
{
return m_bFreeResPtr;
}
bool IsConst () const final { return false; }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_SubstringIndex_c");
CALC_CHILD_HASH(m_pArg);
CALC_POD_HASH(m_sDelim);
CALC_POD_HASH(m_iCount);
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_SubstringIndex_c ( *this );
}
private:
int SetResultString ( const char * pDoc, int iDocLen, const BYTE ** ppResStr ) const;
int LeftSearch ( const char * pDoc, int iDocLen, int iCount, bool bGetRight, const BYTE ** ppResStr, int * pResLen ) const;
int RightSearch ( const char * pDoc, int iDocLen, int iCount, const BYTE ** ppResStr, int * pResLen ) const;
Expr_SubstringIndex_c ( const Expr_SubstringIndex_c& rhs )
: m_pArg ( SafeClone (rhs.m_pArg) )
, m_sDelim ( rhs.m_sDelim )
, m_iCount ( rhs.m_iCount )
, m_iLenDelim ( rhs.m_iLenDelim )
, m_bFreeResPtr ( rhs.m_bFreeResPtr )
{}
};
// in case of input static string, function returns only pointer and length of substring. buffer is not allocated
// in case of input dynamic string, function allocates buffer for substring and copy substring to it
int Expr_SubstringIndex_c::SetResultString ( const char * pDoc, int iDocLen, const BYTE ** ppResStr ) const
{
if ( !IsDataPtrAttr() )
{
*ppResStr = (const BYTE *) pDoc;
}
else
{
CSphString sRetVal;
sRetVal.SetBinary ( pDoc, iDocLen );
*ppResStr = (const BYTE *) sRetVal.Leak();
}
return iDocLen;
}
int Expr_SubstringIndex_c::LeftSearch ( const char * pDoc, int iDocLen, int iCount, bool bGetRight, const BYTE ** ppResStr, int * pResLen ) const
{
int iTotalDelim = 0;
const char * pDelBeg = m_sDelim.cstr();
const char * pDelEnd = pDelBeg + m_iLenDelim;
const char * pStrBeg = pDoc;
const char * pStrEnd = (pStrBeg + iDocLen) - m_iLenDelim + 1;
while ( pStrBeg<pStrEnd )
{
// check first delimer string's char with current char from pStr
if ( *pStrBeg==*pDelBeg )
{
// first char is found, now we compare next chars in delimer string
bool bMatched = true;
const char * p1 = pStrBeg + 1;
const char * p2 = pDelBeg + 1;
while ( bMatched && p2!=pDelEnd )
{
if ( *p1!=*p2 )
bMatched = false;
p1++;
p2++;
}
// if we found matched delimer string, then return left substring or search next delimer string
if ( bMatched )
{
iTotalDelim++;
iCount--;
if ( iCount==0 )
{
if ( ppResStr && !bGetRight )
*pResLen = SetResultString ( pDoc, int ( pStrBeg - pDoc ), ppResStr );
if ( ppResStr && bGetRight )
{
pStrBeg += m_iLenDelim;
*pResLen = SetResultString ( pStrBeg, iDocLen - int (pStrBeg - pDoc), ppResStr );
}
return iTotalDelim;
}
pStrBeg += m_iLenDelim;
continue;
}
}
// delimer string does not maatch with current ptr, goto to next char and repeat comparation
int iCharLen = sphUTF8Len ( pStrBeg, 1 );
pStrBeg += ( iCharLen > 0 ) ? iCharLen : 1;
}
// not found, return original string
if ( iCount && ppResStr )
*pResLen = SetResultString ( pDoc, iDocLen, ppResStr );
return iTotalDelim;
}
int Expr_SubstringIndex_c::RightSearch ( const char * pDoc, int iDocLen, int iCount, const BYTE ** ppResStr, int * pResLen ) const
{
// find and count (iNumFoundDelim) of all delimer sub strings
int iNumFoundDelim = LeftSearch ( pDoc, iDocLen, iDocLen+1, false, NULL, NULL );
// correct iCount (which is negative) to positive index from left to right
iCount += iNumFoundDelim + 1;
// if not found, return original string
if ( iCount<=0 )
*pResLen = SetResultString ( pDoc, iDocLen, ppResStr );
// find delimer sub string according to iCount and return result
return LeftSearch ( pDoc, iDocLen, iCount, true, ppResStr, pResLen );
}
class ExprCaseBase_c : public ISphStringExpr
{
public:
explicit ExprCaseBase_c ( ISphExpr * pArg, const char * sClassName )
: m_pArg ( pArg )
, m_sClassName ( sClassName )
{
assert( pArg );
SafeAddRef( pArg );
}
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) override
{
if ( m_pArg )
m_pArg->FixupLocator ( pOldSchema, pNewSchema );
}
void Command ( ESphExprCommand eCmd, void * pArg ) override
{
if ( m_pArg )
m_pArg->Command ( eCmd, pArg );
}
bool IsDataPtrAttr() const final
{
return true;
}
bool IsConst () const final { return false; }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME(m_sClassName);
CALC_CHILD_HASH(m_pArg);
return CALC_DEP_HASHES();
}
protected:
CSphRefcountedPtr<ISphExpr> m_pArg;
ExprCaseBase_c ( const ExprCaseBase_c & rhs )
: m_pArg ( SafeClone ( rhs.m_pArg ) )
, m_sClassName ( rhs.m_sClassName )
{}
private:
const char * m_sClassName = nullptr;
};
template<bool UPPER>
class ExprCaseTrival_c : public ExprCaseBase_c
{
public:
explicit ExprCaseTrival_c ( ISphExpr * pArg )
: ExprCaseBase_c ( pArg, "ExprCaseTrival_c" )
{
}
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const final
{
const char * pDoc = nullptr;
int iDocLen = m_pArg->StringEval ( tMatch, (const BYTE **)&pDoc );
*ppStr = nullptr;
// create CSphVector and store the value
CSphVector<BYTE> dStrBuffer;
dStrBuffer.Append ( pDoc, iDocLen );
BYTE * pStrBeg = dStrBuffer.begin();
const BYTE * pStrEnd = ( pStrBeg + iDocLen );
if ( pDoc && iDocLen>0 )
{
while( pStrBeg<pStrEnd )
{
// convert the current character to its uppercase or lowercase version if it exists
DoCase ( (char *)pStrBeg );
pStrBeg++;
}
}
*ppStr = dStrBuffer.LeakData();
FreeDataPtr ( *m_pArg, pDoc );
// return the resultant string
return iDocLen;
}
ISphExpr * Clone () const final
{
return new ExprCaseTrival_c ( *this );
}
private:
void DoCase ( char * pString ) const;
ExprCaseTrival_c ( const ExprCaseTrival_c & rhs )
: ExprCaseBase_c ( rhs )
{}
};
// For upper() function
template<>
void ExprCaseTrival_c<true>::DoCase ( char * pString ) const
{
*pString = toupper ( *pString );
}
// For lower() function
template<>
void ExprCaseTrival_c<false>::DoCase ( char * pString ) const
{
*pString = tolower ( *pString );
}
void UTF8ToLower( std::vector<char> & dBuf, std::basic_string_view<char> source )
{
return una::detail::t_map<std::vector<char>, std::basic_string_view<char>, una::detail::impl_x_case_map_utf8, una::detail::impl_case_map_loc_utf8> ( dBuf, source, una::detail::impl_case_map_mode_lowercase );
}
void UTF8ToUpper( std::vector<char> & dBuf, std::basic_string_view<char> source )
{
return una::detail::t_map<std::vector<char>, std::basic_string_view<char>, una::detail::impl_x_case_map_utf8, una::detail::impl_case_map_loc_utf8> ( dBuf, source, una::detail::impl_case_map_mode_uppercase );
}
template<bool UPPER>
class ExprCaseComplex_c : public ExprCaseBase_c
{
public:
explicit ExprCaseComplex_c ( ISphExpr * pArg )
: ExprCaseBase_c ( pArg, "ExprCaseComplex_c" )
{
}
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const final
{
*ppStr = nullptr;
const char * pSrcDoc = nullptr;
int iSrcDocLen = m_pArg->StringEval ( tMatch, (const BYTE **)&pSrcDoc );
std::vector<char> & dBuf = const_cast<std::vector<char> &> ( m_dBuf );
dBuf.resize ( 0 );
if ( UPPER )
UTF8ToUpper ( dBuf, std::basic_string_view<char> ( pSrcDoc, iSrcDocLen ) );
else
UTF8ToLower ( dBuf, std::basic_string_view<char> ( pSrcDoc, iSrcDocLen ) );
int iDstDocLen = dBuf.size();
CSphFixedVector<BYTE> dDst ( iDstDocLen );
memcpy ( dDst.Begin(), dBuf.data(), iDstDocLen );
*ppStr = dDst.LeakData();
// return the resultant string
return iDstDocLen;
}
ISphExpr * Clone () const final
{
return new ExprCaseComplex_c ( *this );
}
private:
ExprCaseComplex_c ( const ExprCaseComplex_c & rhs )
: ExprCaseBase_c ( rhs )
{}
std::vector<char> m_dBuf;
};
class Expr_Iterator_c : public Expr_JsonField_c
{
public:
Expr_Iterator_c ( const CSphAttrLocator & tLocator, const CSphString & sAttr, CSphVector<ISphExpr*> & dArgs, CSphVector<ESphAttr> & dRetTypes, SphAttr_t * pData )
: Expr_JsonField_c ( tLocator, sAttr, dArgs, dRetTypes )
, m_pData ( pData )
{}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final
{
uint64_t uPacked = m_pData ? *m_pData : 0;
ESphJsonType eType = sphJsonUnpackType ( uPacked );
const BYTE * pVal = m_pBlobPool + sphJsonUnpackOffset ( uPacked );
return DoEval ( eType, pVal, tMatch );
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
Expr_JsonField_c::Command ( eCmd, pArg );
if ( eCmd==SPH_EXPR_SET_ITERATOR )
m_pData = (SphAttr_t *) pArg;
}
ISphExpr* Clone() const final
{
return new Expr_Iterator_c ( *this );
// note that m_pData most probably wrong and need to be set with SPH_EXPR_SET_ITERATOR
}
private:
SphAttr_t * m_pData {nullptr};
Expr_Iterator_c ( const Expr_Iterator_c& ) = default;
};
////////////////////////////////////////////////////////////////////
class ExprConstArgs_c : public CSphFilterSettings
{
public:
bool m_bOr = false;
bool m_bAnd = false;
void AppendFilter ( const CSphFilterSettings & tFilter );
bool IsError() const { return !m_bSet || m_bError; }
private:
bool m_bSet = false;
bool m_bError = false;
};
void ExprConstArgs_c::AppendFilter ( const CSphFilterSettings & tFilter )
{
if ( !m_bSet )
{
*(CSphFilterSettings*)this = tFilter;
m_bSet = true;
return;
}
if ( m_eType!=tFilter.m_eType || m_bExclude || tFilter.m_bExclude )
{
m_bError = true;
return;
}
switch ( m_eType )
{
case SPH_FILTER_STRING_LIST:
for ( auto & i : tFilter.m_dStrings )
m_dStrings.Add(i);
break;
case SPH_FILTER_VALUES:
for ( auto & i : tFilter.m_dValues )
m_dValues.Add(i);
break;
case SPH_FILTER_RANGE:
case SPH_FILTER_FLOATRANGE:
m_iMinValue = Max ( m_iMinValue, tFilter.m_iMinValue );
m_iMaxValue = Min ( m_iMaxValue, tFilter.m_iMaxValue );
m_fMinValue = Max ( m_fMinValue, tFilter.m_fMinValue );
m_fMaxValue = Min ( m_fMaxValue, tFilter.m_fMaxValue );
m_bHasEqualMin |= tFilter.m_bHasEqualMin;
m_bHasEqualMax |= tFilter.m_bHasEqualMax;
m_bOpenLeft &= tFilter.m_bOpenLeft;
m_bOpenRight &= tFilter.m_bOpenRight;
break;
default:
m_bError = true;
break;
}
}
////////////////////////////////////////////////////////////////////
class Expr_BinaryFilter_c : public Expr_Binary_c
{
using Expr_Binary_c::Expr_Binary_c;
protected:
void PopulateConstArgsStr ( ESphExprCommand eCmd, void * pArg ) const;
void PopulateConstArgsLtInt ( ESphExprCommand eCmd, void * pArg ) const;
void PopulateConstArgsGtInt ( ESphExprCommand eCmd, void * pArg ) const;
void PopulateConstArgsLteInt ( ESphExprCommand eCmd, void * pArg ) const;
void PopulateConstArgsGteInt ( ESphExprCommand eCmd, void * pArg ) const;
void PopulateConstArgsEqInt ( ESphExprCommand eCmd, void * pArg ) const;
void PopulateConstArgsNeInt ( ESphExprCommand eCmd, void * pArg ) const;
void SetFlagAnd ( ESphExprCommand eCmd, void * pArg ) const;
void SetFlagOr ( ESphExprCommand eCmd, void * pArg ) const;
private:
const ISphExpr * GetExprForConsts ( ESphExprCommand eCmd ) const;
template <typename ACTION>
void AddFilter ( ESphExprCommand eCmd, void * pArg, ACTION && fnAction ) const;
};
template <typename ACTION>
void Expr_BinaryFilter_c::AddFilter ( ESphExprCommand eCmd, void * pArg, ACTION && fnAction ) const
{
const ISphExpr * pToEval = GetExprForConsts(eCmd);
if ( !pToEval )
return;
CSphFilterSettings tFilter;
CSphMatch tStub;
fnAction ( tFilter, tStub, pToEval );
((ExprConstArgs_c*)pArg)->AppendFilter(tFilter);
}
void Expr_BinaryFilter_c::PopulateConstArgsStr ( ESphExprCommand eCmd, void * pArg ) const
{
AddFilter ( eCmd, pArg, []( CSphFilterSettings & tFilter, const CSphMatch & tMatch, const ISphExpr * pExpr )
{
const BYTE * pStr = nullptr;
int iLen = pExpr->StringEval ( tMatch, &pStr );
tFilter.m_eType = SPH_FILTER_STRING_LIST;
tFilter.m_dStrings.Add ( CSphString ( (const char*)pStr, iLen ) );
} );
}
void Expr_BinaryFilter_c::PopulateConstArgsLtInt ( ESphExprCommand eCmd, void * pArg ) const
{
AddFilter ( eCmd, pArg, []( CSphFilterSettings & tFilter, const CSphMatch & tMatch, const ISphExpr * pExpr )
{
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_iMaxValue = pExpr->Int64Eval(tMatch);
tFilter.m_bHasEqualMax = false;
tFilter.m_bOpenLeft = true;
} );
}
void Expr_BinaryFilter_c::PopulateConstArgsGtInt ( ESphExprCommand eCmd, void * pArg ) const
{
AddFilter ( eCmd, pArg, []( CSphFilterSettings & tFilter, const CSphMatch & tMatch, const ISphExpr * pExpr )
{
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_iMinValue = pExpr->Int64Eval(tMatch);
tFilter.m_bHasEqualMin = false;
tFilter.m_bOpenRight = true;
} );
}
void Expr_BinaryFilter_c::PopulateConstArgsLteInt ( ESphExprCommand eCmd, void * pArg ) const
{
AddFilter ( eCmd, pArg, []( CSphFilterSettings & tFilter, const CSphMatch & tMatch, const ISphExpr * pExpr )
{
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_iMaxValue = pExpr->Int64Eval(tMatch);
tFilter.m_bOpenLeft = true;
} );
}
void Expr_BinaryFilter_c::PopulateConstArgsGteInt ( ESphExprCommand eCmd, void * pArg ) const
{
AddFilter ( eCmd, pArg, []( CSphFilterSettings & tFilter, const CSphMatch & tMatch, const ISphExpr * pExpr )
{
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_iMinValue = pExpr->Int64Eval(tMatch);
tFilter.m_bOpenRight = true;
} );
}
void Expr_BinaryFilter_c::PopulateConstArgsEqInt ( ESphExprCommand eCmd, void * pArg ) const
{
AddFilter ( eCmd, pArg, []( CSphFilterSettings & tFilter, const CSphMatch & tMatch, const ISphExpr * pExpr )
{
tFilter.m_eType = SPH_FILTER_VALUES;
tFilter.m_dValues.Add ( pExpr->Int64Eval(tMatch) );
} );
}
void Expr_BinaryFilter_c::PopulateConstArgsNeInt ( ESphExprCommand eCmd, void * pArg ) const
{
AddFilter ( eCmd, pArg, []( CSphFilterSettings & tFilter, const CSphMatch & tMatch, const ISphExpr * pExpr )
{
tFilter.m_eType = SPH_FILTER_VALUES;
tFilter.m_bExclude = true;
tFilter.m_dValues.Add ( pExpr->Int64Eval(tMatch) );
} );
}
const ISphExpr * Expr_BinaryFilter_c::GetExprForConsts ( ESphExprCommand eCmd ) const
{
if ( eCmd!=SPH_EXPR_COLLECT_CONST_ARGS )
return nullptr;
if ( m_pFirst->IsConst() && !m_pSecond->IsConst() )
return m_pFirst;
if ( !m_pFirst->IsConst() && m_pSecond->IsConst() )
return m_pSecond;
return nullptr;
}
void Expr_BinaryFilter_c::SetFlagAnd ( ESphExprCommand eCmd, void * pArg ) const
{
if ( eCmd==SPH_EXPR_COLLECT_CONST_ARGS )
((ExprConstArgs_c*)pArg)->m_bAnd = true;
}
void Expr_BinaryFilter_c::SetFlagOr ( ESphExprCommand eCmd, void * pArg ) const
{
if ( eCmd==SPH_EXPR_COLLECT_CONST_ARGS )
((ExprConstArgs_c*)pArg)->m_bOr = true;
}
////////////////////////////////////////////////////////////////////
bool CanAliasedExprSetupAsFilter ( const CSphFilterSettings & tFilter, bool & bExclude )
{
switch ( tFilter.m_eType )
{
case SPH_FILTER_VALUES:
// fixme! this means "return nothing"
if ( tFilter.m_dValues.GetLength()!=1 )
return false;
if ( tFilter.m_dValues[0]!=0 && tFilter.m_dValues[0]!=1 )
return false;
bExclude = ( tFilter.m_dValues[0]==0 ) ^ tFilter.m_bExclude;
return true;
case SPH_FILTER_RANGE:
{
bool bFilterIncludes0 = EvalRange ( 0, tFilter );
bool bFilterIncludes1 = EvalRange ( 1, tFilter );
// fixme! this means "return all"
if ( bFilterIncludes0 && bFilterIncludes1 )
return false;
// fixme! this means "return nothing"
if ( !bFilterIncludes0 && !bFilterIncludes1 )
return false;
bExclude = !bFilterIncludes1;
}
return true;
default:
return false;
}
}
////////////////////////////////////////////////////////////////////
class Expr_ForIn_c : public Expr_JsonFieldConv_c
{
public:
Expr_ForIn_c ( ISphExpr * pArg, bool bStrict, bool bIndex )
: Expr_JsonFieldConv_c ( pArg )
, m_bStrict ( bStrict )
, m_bIndex ( bIndex )
{}
SphAttr_t * GetRef ()
{
return (SphAttr_t*)&m_uData;
}
void SetExpr ( ISphExpr * pExpr )
{
if ( pExpr==m_pExpr )
return;
SafeAddRef ( pExpr );
m_pExpr = pExpr;
}
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) final
{
Expr_JsonFieldConv_c::FixupLocator ( pOldSchema, pNewSchema );
if ( m_pExpr )
m_pExpr->FixupLocator ( pOldSchema, pNewSchema );
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
Expr_JsonFieldConv_c::Command ( eCmd, pArg );
if ( m_pExpr )
m_pExpr->Command ( eCmd, pArg );
}
bool ExprEval ( int * pResult, const CSphMatch & tMatch, int iIndex, ESphJsonType eType, const BYTE * pVal ) const
{
m_uData = sphJsonPackTypeOffset ( eType, pVal-m_pBlobPool );
bool bMatch = m_pExpr->Eval ( tMatch )!=0;
*pResult = bMatch ? ( m_bIndex ? iIndex : 1 ) : ( m_bIndex ? -1 : 0 );
return m_bStrict==bMatch;
}
int IntEval ( const CSphMatch & tMatch ) const final
{
int iResult = m_bIndex ? -1 : 0;
if ( !m_pExpr )
return iResult;
const BYTE * p = nullptr;
ESphJsonType eJson = GetKey ( &p, tMatch );
switch ( eJson )
{
case JSON_INT32_VECTOR:
case JSON_INT64_VECTOR:
case JSON_DOUBLE_VECTOR:
{
int iSize = eJson==JSON_INT32_VECTOR ? 4 : 8;
ESphJsonType eType = eJson==JSON_INT32_VECTOR ? JSON_INT32
: eJson==JSON_INT64_VECTOR ? JSON_INT64
: JSON_DOUBLE;
int iLen = sphJsonUnpackInt ( &p );
for ( int i=0; i<iLen; i++, p+=iSize )
if ( !ExprEval ( &iResult, tMatch, i, eType, p ) )
break;
break;
}
case JSON_STRING_VECTOR:
{
sphJsonUnpackInt ( &p );
int iLen = sphJsonUnpackInt ( &p );
for ( int i=0;i<iLen;i++ )
{
if ( !ExprEval ( &iResult, tMatch, i, JSON_STRING, p ) )
break;
sphJsonSkipNode ( JSON_STRING, &p );
}
break;
}
case JSON_MIXED_VECTOR:
{
sphJsonUnpackInt ( &p );
int iLen = sphJsonUnpackInt ( &p );
for ( int i=0; i<iLen; ++i )
{
auto eType = (ESphJsonType)*p++;
if ( !ExprEval ( &iResult, tMatch, i, eType, p ) )
break;
sphJsonSkipNode ( eType, &p );
}
break;
}
default:
break;
}
return iResult;
}
float Eval ( const CSphMatch & tMatch ) const final { return (float)IntEval ( tMatch ); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return (int64_t)IntEval ( tMatch ); }
bool SetupAsFilter ( CSphFilterSettings & tFilter, const ISphSchema & tSchema, const SIContainer_c & tSI ) const override
{
bool bExclude = false;
if ( !CanAliasedExprSetupAsFilter ( tFilter, bExclude ) )
return false;
if ( m_bStrict )
return false;
ExprConstArgs_c tConstArgs;
m_pExpr->Command ( SPH_EXPR_COLLECT_CONST_ARGS, (void*)&tConstArgs );
if ( tConstArgs.IsError() )
return false;
if ( tConstArgs.m_bAnd && tConstArgs.m_bOr )
return false;
std::pair<const ISphSchema*,CSphString> tSchemaWithName;
tSchemaWithName.first = &tSchema;
m_pArg->Command ( SPH_EXPR_FORMAT_AS_TEXT, (void*)&tSchemaWithName );
if ( tSchemaWithName.second.IsEmpty() )
return false;
if ( !tSI.IsEnabled ( tSchemaWithName.second ) )
return false;
bool bOk = false;
switch ( tConstArgs.m_eType )
{
case SPH_FILTER_VALUES:
case SPH_FILTER_STRING_LIST:
bOk = !tConstArgs.m_bAnd;
break;
case SPH_FILTER_RANGE:
case SPH_FILTER_FLOATRANGE:
bOk = !tConstArgs.m_bOr;
break;
default:
break;
}
if ( !bOk )
return false;
tFilter = tConstArgs;
tFilter.m_sAttrName = tSchemaWithName.second;
tFilter.m_eMvaFunc = SPH_MVAFUNC_ANY;
tFilter.m_bExclude = bExclude;
if ( tFilter.m_eType==SPH_FILTER_VALUES )
tFilter.m_dValues.Uniq();
return true;
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_ForIn_c");
CALC_POD_HASH(m_bStrict);
CALC_POD_HASH(m_bIndex);
CALC_CHILD_HASH(m_pExpr);
return CALC_PARENT_HASH();
}
ISphExpr * Clone() const final { return new Expr_ForIn_c ( *this ); }
private:
CSphRefcountedPtr<ISphExpr> m_pExpr;
bool m_bStrict {false};
bool m_bIndex {false};
mutable uint64_t m_uData {0};
Expr_ForIn_c ( const Expr_ForIn_c& rhs )
: Expr_JsonFieldConv_c ( rhs )
, m_pExpr ( SafeClone ( rhs.m_pExpr ) )
, m_bStrict ( rhs.m_bStrict )
, m_bIndex ( rhs.m_bIndex )
{
if ( m_pExpr )
m_pExpr->Command ( SPH_EXPR_SET_ITERATOR, &m_uData );
}
};
class Expr_StrCmp_c : public Expr_BinaryFilter_c
{
public:
Expr_StrCmp_c ( ISphExpr * pLeft, ISphExpr * pRight, ESphCollation eCollation, bool bExclude, EStrCmpDir eStrCmpDir )
: Expr_BinaryFilter_c ( "Expr_StrEq_c", pLeft, pRight )
, m_bExclude ( bExclude )
, m_eStrCmpDir ( eStrCmpDir )
{
m_fnStrCmp = GetStringCmpFunc ( eCollation );
}
int IntEval ( const CSphMatch & tMatch ) const final
{
const BYTE * pLeft = nullptr;
const BYTE * pRight = nullptr;
int iLeft = m_pFirst->StringEval ( tMatch, &pLeft );
int iRight = m_pSecond->StringEval ( tMatch, &pRight );
int iCmp = m_fnStrCmp ( {pLeft, iLeft}, {pRight, iRight}, false );
FreeDataPtr ( *m_pFirst, pLeft );
FreeDataPtr ( *m_pSecond, pRight );
switch ( m_eStrCmpDir )
{
case EStrCmpDir::LT: return iCmp<0 ^ m_bExclude;
case EStrCmpDir::GT: return iCmp>0 ^ m_bExclude;
case EStrCmpDir::EQ: return !iCmp ^ m_bExclude;
default: return 0;
}
}
float Eval ( const CSphMatch & tMatch ) const final { return (float)IntEval ( tMatch ); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return (int64_t)IntEval ( tMatch ); }
void Command ( ESphExprCommand eCmd, void * pArg )
{
Expr_Binary_c::Command ( eCmd, pArg );
PopulateConstArgsStr ( eCmd, pArg );
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_StrCmp_c");
CALC_POD_HASH(m_fnStrCmp);
CALC_POD_HASH(m_bExclude);
CALC_POD_HASH(m_eStrCmpDir);
CALC_CHILD_HASH(m_pFirst);
CALC_CHILD_HASH(m_pSecond);
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_StrCmp_c ( *this );
}
private:
SphStringCmp_fn m_fnStrCmp {nullptr};
bool m_bExclude = false;
EStrCmpDir m_eStrCmpDir;
Expr_StrCmp_c ( const Expr_StrCmp_c & ) = default;
};
class Expr_JsonFieldIsNull_c : public Expr_JsonFieldConv_c
{
public:
Expr_JsonFieldIsNull_c ( ISphExpr * pArg, bool bEquals )
: Expr_JsonFieldConv_c ( pArg )
, m_bEquals ( bEquals )
{}
int IntEval ( const CSphMatch & tMatch ) const final
{
const BYTE * pVal = nullptr;
ESphJsonType eJson = GetKey ( &pVal, tMatch );
return m_bEquals ^ ( eJson!=JSON_EOF && eJson!=JSON_NULL );
}
float Eval ( const CSphMatch & tMatch ) const final { return (float)IntEval ( tMatch ); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return (int64_t)IntEval ( tMatch ); }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_JsonFieldIsNull_c");
CALC_POD_HASH(m_bEquals);
return CALC_PARENT_HASH();
}
ISphExpr* Clone() const final
{
return new Expr_JsonFieldIsNull_c ( *this );
}
private:
bool m_bEquals;
Expr_JsonFieldIsNull_c ( const Expr_JsonFieldIsNull_c& ) = default;
};
//////////////////////////////////////////////////////////////////////////
class Expr_MinTopWeight_c : public Expr_NoLocator_c
{
public:
int IntEval ( const CSphMatch & ) const final { return m_pWeight ? *m_pWeight : -INT_MAX; }
float Eval ( const CSphMatch & ) const final { return m_pWeight ? (float)*m_pWeight : -FLT_MAX; }
int64_t Int64Eval ( const CSphMatch & ) const final { return m_pWeight ? *m_pWeight : -LLONG_MAX; }
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
CSphMatch * pWorst;
if ( eCmd!=SPH_EXPR_SET_EXTRA_DATA )
return;
if ( static_cast<ISphExtra*>(pArg)->ExtraData ( EXTRA_GET_QUEUE_WORST, (void**)&pWorst ) )
m_pWeight = &pWorst->m_iWeight;
}
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & bDisable ) final
{
bDisable = true;
return 0;
}
ISphExpr * Clone () const final
{
return new Expr_MinTopWeight_c;
}
private:
int * m_pWeight {nullptr};
};
class Expr_MinTopSortval_c : public Expr_NoLocator_c
{
public:
float Eval ( const CSphMatch & ) const final
{
if ( m_pWorst && m_pWorst->m_pDynamic && m_iSortval>=0 )
return *(float*)( m_pWorst->m_pDynamic + m_iSortval );
return -FLT_MAX;
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
if ( eCmd!=SPH_EXPR_SET_EXTRA_DATA )
return;
auto * p = (ISphExtra*)pArg;
if ( !p->ExtraData ( EXTRA_GET_QUEUE_WORST, (void**)&m_pWorst )
|| !p->ExtraData ( EXTRA_GET_QUEUE_SORTVAL, (void**)&m_iSortval ) )
{
m_pWorst = nullptr;
}
}
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & bDisable ) final
{
bDisable = true;
return 0;
}
ISphExpr * Clone () const final
{
return new Expr_MinTopSortval_c;
}
private:
CSphMatch * m_pWorst {nullptr};
int m_iSortval {-1};
};
class Expr_Rand_c : public Expr_Unary_c
{
public:
Expr_Rand_c ( ISphExpr * pFirst, bool bConst )
: Expr_Unary_c ( "Expr_Rand_c", pFirst )
, m_bConst ( bConst )
{
sphAutoSrand ();
m_uState = ( (uint64_t)sphRand() << 32 ) + sphRand();
}
uint64_t XorShift64Star() const
{
m_uState ^= m_uState >> 12;
m_uState ^= m_uState << 25;
m_uState ^= m_uState >> 27;
return m_uState * 2685821657736338717ULL;
}
float Eval ( const CSphMatch & tMatch ) const final
{
if ( m_pFirst )
{
uint64_t uSeed = (uint64_t)m_pFirst->Int64Eval ( tMatch );
if ( !m_bConst )
m_uState = uSeed;
else if ( m_bFirstEval )
{
m_uState = uSeed;
m_bFirstEval = false;
}
}
return (float)( XorShift64Star() / (double)UINT64_MAX );
}
int IntEval ( const CSphMatch & tMatch ) const final { return (int)Eval ( tMatch ); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return (int64_t)Eval ( tMatch ); }
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & bDisable ) final
{
bDisable = true;
return 0;
}
ISphExpr * Clone () const final
{
return new Expr_Rand_c ( *this );
}
private:
bool m_bConst {false};
mutable bool m_bFirstEval {true};
mutable uint64_t m_uState {0};
private:
Expr_Rand_c ( const Expr_Rand_c& rhs )
: Expr_Unary_c ( rhs )
, m_bConst ( rhs.m_bConst )
{}
};
#define EVALFIRST m_pFirst->Eval(tMatch)
#define EVALSECOND m_pSecond->Eval(tMatch)
#define EVALTHIRD m_pThird->Eval(tMatch)
#define INTFIRST m_pFirst->IntEval(tMatch)
#define INTSECOND m_pSecond->IntEval(tMatch)
#define INTTHIRD m_pThird->IntEval(tMatch)
#define INT64FIRST m_pFirst->Int64Eval(tMatch)
#define INT64SECOND m_pSecond->Int64Eval(tMatch)
#define INT64THIRD m_pThird->Int64Eval(tMatch)
#define DECLARE_UNARY_TRAITS(_classname) \
class _classname : public Expr_Unary_c \
{ \
public: \
explicit _classname ( ISphExpr * pFirst ) : Expr_Unary_c ( #_classname, pFirst ) {} \
_classname ( const _classname& rhs ) : Expr_Unary_c (rhs) {} \
ISphExpr* Clone() const final { return new _classname(*this); }
#define DECLARE_END() };
#define DECLARE_UNARY_FLT(_classname,_expr) \
DECLARE_UNARY_TRAITS ( _classname ) \
float Eval ( const CSphMatch & tMatch ) const final { return _expr; } \
};
#define DECLARE_UNARY_INT(_classname,_expr,_expr2,_expr3) \
DECLARE_UNARY_TRAITS ( _classname ) \
float Eval ( const CSphMatch & tMatch ) const final { return (float)_expr; } \
int IntEval ( const CSphMatch & tMatch ) const final { return _expr2; } \
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return _expr3; } \
};
#define IABS(_arg) ( (_arg)>0 ? (_arg) : (-_arg) )
DECLARE_UNARY_INT ( Expr_Neg_c, -EVALFIRST, -INTFIRST, -INT64FIRST )
DECLARE_UNARY_INT ( Expr_Abs_c, fabs(EVALFIRST), IABS(INTFIRST), IABS(INT64FIRST) )
DECLARE_UNARY_INT ( Expr_Ceil_c, float(ceil(EVALFIRST)), int(ceil(EVALFIRST)), int64_t(ceil(EVALFIRST)) )
DECLARE_UNARY_INT ( Expr_Floor_c, float(floor(EVALFIRST)), int(floor(EVALFIRST)), int64_t(floor(EVALFIRST)) )
DECLARE_UNARY_FLT ( Expr_Sin_c, float(sin(EVALFIRST)) )
DECLARE_UNARY_FLT ( Expr_Cos_c, float(cos(EVALFIRST)) )
DECLARE_UNARY_FLT ( Expr_Exp_c, float(exp(EVALFIRST)) )
DECLARE_UNARY_INT ( Expr_NotInt_c, (float)(INTFIRST?0:1), INTFIRST?0:1, INTFIRST?0:1 )
DECLARE_UNARY_INT ( Expr_NotInt64_c, (float)(INT64FIRST?0:1), INT64FIRST?0:1, INT64FIRST?0:1 )
DECLARE_UNARY_INT ( Expr_Sint_c, (float)(INTFIRST), INTFIRST, INTFIRST )
DECLARE_UNARY_TRAITS ( Expr_Ln_c )
float Eval ( const CSphMatch & tMatch ) const final
{
float fFirst = m_pFirst->Eval ( tMatch );
// ideally this would be SQLNULL instead of plain 0.0f
return fFirst>0.0f ? (float)log ( fFirst ) : 0.0f;
}
DECLARE_END()
DECLARE_UNARY_TRAITS ( Expr_Log2_c )
float Eval ( const CSphMatch & tMatch ) const final
{
float fFirst = m_pFirst->Eval ( tMatch );
// ideally this would be SQLNULL instead of plain 0.0f
return fFirst>0.0f ? (float)( log ( fFirst )*M_LOG2E ) : 0.0f;
}
DECLARE_END()
DECLARE_UNARY_TRAITS ( Expr_Log10_c )
float Eval ( const CSphMatch & tMatch ) const final
{
float fFirst = m_pFirst->Eval ( tMatch );
// ideally this would be SQLNULL instead of plain 0.0f
return fFirst>0.0f ? (float)( log ( fFirst )*M_LOG10E ) : 0.0f;
}
DECLARE_END()
DECLARE_UNARY_TRAITS ( Expr_Sqrt_c )
float Eval ( const CSphMatch & tMatch ) const final
{
float fFirst = m_pFirst->Eval ( tMatch );
// ideally this would be SQLNULL instead of plain 0.0f in case of negative argument
// MEGA optimization: do not call sqrt for 0.0f
return fFirst>0.0f ? (float)sqrt ( fFirst ) : 0.0f;
}
DECLARE_END()
//////////////////////////////////////////////////////////////////////////
#define DECLARE_BINARY_TRAITS(_classname,_parent) \
class _classname : public _parent \
{ \
public: \
_classname ( ISphExpr * pFirst, ISphExpr * pSecond ) : _parent ( #_classname, pFirst, pSecond ) {} \
_classname ( const _classname& rhs ) : _parent (rhs) {} \
ISphExpr* Clone() const final { return new _classname(*this); }
#define DECLARE_BINARY_FLT(_classname,_expr) \
DECLARE_BINARY_TRAITS ( _classname, Expr_Binary_c ) \
float Eval ( const CSphMatch & tMatch ) const final { return _expr; } \
};
#define DECLARE_BINARY_INT(_classname,_expr,_expr2,_expr3) \
DECLARE_BINARY_TRAITS ( _classname, Expr_Binary_c ) \
float Eval ( const CSphMatch & tMatch ) const final { return _expr; } \
int IntEval ( const CSphMatch & tMatch ) const final { return _expr2; } \
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return _expr3; } \
};
#define DECLARE_BINARY_INT_EXPR(_classname,_expr,_expr2,_expr3,_expr4) \
DECLARE_BINARY_TRAITS ( _classname, Expr_BinaryFilter_c ) \
float Eval ( const CSphMatch & tMatch ) const final { return _expr; } \
int IntEval ( const CSphMatch & tMatch ) const final { return _expr2; } \
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return _expr3; } \
void Command ( ESphExprCommand eCmd, void * pArg ) final \
{ \
Expr_Binary_c::Command ( eCmd, pArg ); \
_expr4 ( eCmd, pArg ); \
} \
};
#define DECLARE_BINARY_POLY(_classname,_expr,_expr2,_expr3,_expr4) \
DECLARE_BINARY_INT_EXPR ( _classname##Float_c, _expr, (int)Eval(tMatch), (int64_t)Eval(tMatch ), _expr4 ) \
DECLARE_BINARY_INT_EXPR ( _classname##Int_c, (float)IntEval(tMatch), _expr2, (int64_t)IntEval(tMatch), _expr4 ) \
DECLARE_BINARY_INT_EXPR ( _classname##Int64_c, (float)Int64Eval(tMatch), (int)Int64Eval(tMatch), _expr3, _expr4 )
#define IFFLT(_expr) ( (_expr) ? 1.0f : 0.0f )
#define IFINT(_expr) ( (_expr) ? 1 : 0 )
DECLARE_BINARY_INT ( Expr_Add_c, EVALFIRST + EVALSECOND, (DWORD)INTFIRST + (DWORD)INTSECOND, (uint64_t)INT64FIRST + (uint64_t)INT64SECOND )
DECLARE_BINARY_INT ( Expr_Sub_c, EVALFIRST - EVALSECOND, (DWORD)INTFIRST - (DWORD)INTSECOND, (uint64_t)INT64FIRST - (uint64_t)INT64SECOND )
DECLARE_BINARY_INT ( Expr_Mul_c, EVALFIRST * EVALSECOND, (DWORD)INTFIRST * (DWORD)INTSECOND, (uint64_t)INT64FIRST * (uint64_t)INT64SECOND )
DECLARE_BINARY_INT ( Expr_BitAnd_c, (float)(int(EVALFIRST)&int(EVALSECOND)), INTFIRST & INTSECOND, INT64FIRST & INT64SECOND )
DECLARE_BINARY_INT ( Expr_BitOr_c, (float)(int(EVALFIRST)|int(EVALSECOND)), INTFIRST | INTSECOND, INT64FIRST | INT64SECOND )
DECLARE_BINARY_INT ( Expr_Mod_c, (float)(int(EVALFIRST)%int(EVALSECOND)), INTFIRST % INTSECOND, INT64FIRST % INT64SECOND )
DECLARE_BINARY_TRAITS ( Expr_Div_c, Expr_Binary_c )
float Eval ( const CSphMatch & tMatch ) const final
{
float fSecond = m_pSecond->Eval ( tMatch );
// ideally this would be SQLNULL instead of plain 0.0f
return fSecond!=0.0f ? m_pFirst->Eval ( tMatch )/fSecond : 0.0f;
}
DECLARE_END()
DECLARE_BINARY_TRAITS ( Expr_Idiv_c, Expr_Binary_c )
float Eval ( const CSphMatch & tMatch ) const final
{
auto iSecond = int(EVALSECOND);
// ideally this would be SQLNULL instead of plain 0.0f
return iSecond ? float(int(EVALFIRST)/iSecond) : 0.0f;
}
int IntEval ( const CSphMatch & tMatch ) const final
{
int iSecond = INTSECOND;
// ideally this would be SQLNULL instead of plain 0
return iSecond ? ( INTFIRST / iSecond ) : 0;
}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final
{
int64_t iSecond = INT64SECOND;
// ideally this would be SQLNULL instead of plain 0
return iSecond ? ( INT64FIRST / iSecond ) : 0;
}
DECLARE_END()
DECLARE_BINARY_POLY ( Expr_Lt, IFFLT ( EVALFIRST<EVALSECOND ), IFINT ( INTFIRST<INTSECOND ), IFINT ( INT64FIRST<INT64SECOND ), PopulateConstArgsLtInt )
DECLARE_BINARY_POLY ( Expr_Gt, IFFLT ( EVALFIRST>EVALSECOND ), IFINT ( INTFIRST>INTSECOND ), IFINT ( INT64FIRST>INT64SECOND ), PopulateConstArgsGtInt )
DECLARE_BINARY_POLY ( Expr_Lte, IFFLT ( EVALFIRST<=EVALSECOND ), IFINT ( INTFIRST<=INTSECOND ), IFINT ( INT64FIRST<=INT64SECOND ), PopulateConstArgsLteInt )
DECLARE_BINARY_POLY ( Expr_Gte, IFFLT ( EVALFIRST>=EVALSECOND ), IFINT ( INTFIRST>=INTSECOND ), IFINT ( INT64FIRST>=INT64SECOND ), PopulateConstArgsGteInt )
DECLARE_BINARY_POLY ( Expr_Eq, IFFLT ( fabs ( EVALFIRST-EVALSECOND )<=1e-6 ), IFINT ( INTFIRST==INTSECOND ), IFINT ( INT64FIRST==INT64SECOND ), PopulateConstArgsEqInt )
DECLARE_BINARY_POLY ( Expr_Ne, IFFLT ( fabs ( EVALFIRST-EVALSECOND )>1e-6 ), IFINT ( INTFIRST!=INTSECOND ), IFINT ( INT64FIRST!=INT64SECOND ), PopulateConstArgsNeInt )
DECLARE_BINARY_INT ( Expr_Min_c, Min ( EVALFIRST, EVALSECOND ), Min ( INTFIRST, INTSECOND ), Min ( INT64FIRST, INT64SECOND ) )
DECLARE_BINARY_INT ( Expr_Max_c, Max ( EVALFIRST, EVALSECOND ), Max ( INTFIRST, INTSECOND ), Max ( INT64FIRST, INT64SECOND ) )
DECLARE_BINARY_FLT ( Expr_Pow_c, float ( pow ( EVALFIRST, EVALSECOND ) ) )
DECLARE_BINARY_POLY ( Expr_And, EVALFIRST!=0.0f && EVALSECOND!=0.0f, IFINT ( INTFIRST && INTSECOND ), IFINT ( INT64FIRST && INT64SECOND ), SetFlagAnd )
DECLARE_BINARY_POLY ( Expr_Or, EVALFIRST!=0.0f || EVALSECOND!=0.0f, IFINT ( INTFIRST || INTSECOND ), IFINT ( INT64FIRST || INT64SECOND ), SetFlagOr )
DECLARE_BINARY_FLT ( Expr_Atan2_c, float ( atan2 ( EVALFIRST, EVALSECOND ) ) )
//////////////////////////////////////////////////////////////////////////
/// boring base stuff
class ExprThreeway_c : public ISphExpr
{
public:
ExprThreeway_c ( const char * szClassName, ISphExpr * pFirst, ISphExpr * pSecond, ISphExpr * pThird )
: m_pFirst ( pFirst )
, m_pSecond ( pSecond )
, m_pThird ( pThird )
, m_szExprName ( szClassName )
{
SafeAddRef ( pFirst );
SafeAddRef ( pSecond );
SafeAddRef ( pThird );
}
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) override
{
m_pFirst->FixupLocator ( pOldSchema, pNewSchema );
m_pSecond->FixupLocator ( pOldSchema, pNewSchema );
m_pThird->FixupLocator ( pOldSchema, pNewSchema );
}
void Command ( ESphExprCommand eCmd, void * pArg ) override
{
m_pFirst->Command ( eCmd, pArg );
m_pSecond->Command ( eCmd, pArg );
m_pThird->Command ( eCmd, pArg );
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) override
{
EXPR_CLASS_NAME_NOCHECK( m_szExprName );
CALC_CHILD_HASH(m_pFirst);
CALC_CHILD_HASH(m_pSecond);
CALC_CHILD_HASH(m_pThird);
return CALC_DEP_HASHES();
}
protected:
CSphRefcountedPtr<ISphExpr> m_pFirst;
CSphRefcountedPtr<ISphExpr> m_pSecond;
CSphRefcountedPtr<ISphExpr> m_pThird;
const char* m_szExprName;
protected:
ExprThreeway_c ( const ExprThreeway_c & rhs )
: m_pFirst ( SafeClone (rhs.m_pFirst) )
, m_pSecond ( SafeClone (rhs.m_pSecond) )
, m_pThird ( SafeClone (rhs.m_pThird) )
, m_szExprName ( rhs.m_szExprName )
{}
};
#define DECLARE_TERNARY(_classname,_expr,_expr2,_expr3) \
class _classname : public ExprThreeway_c \
{ \
public: \
_classname ( ISphExpr * pFirst, ISphExpr * pSecond, ISphExpr * pThird ) \
: ExprThreeway_c ( #_classname, pFirst, pSecond, pThird ) {} \
\
float Eval ( const CSphMatch & tMatch ) const final { return _expr; } \
int IntEval ( const CSphMatch & tMatch ) const final { return _expr2; } \
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return _expr3; } \
_classname ( const _classname& rhs ) : ExprThreeway_c (rhs) {} \
ISphExpr* Clone() const final { return new _classname(*this); } \
};
DECLARE_TERNARY ( Expr_If_c, ( EVALFIRST!=0.0f ) ? EVALSECOND : EVALTHIRD, INTFIRST ? INTSECOND : INTTHIRD, INT64FIRST ? INT64SECOND : INT64THIRD )
DECLARE_TERNARY ( Expr_Madd_c, EVALFIRST*EVALSECOND+EVALTHIRD, INTFIRST*INTSECOND + INTTHIRD, INT64FIRST*INT64SECOND + INT64THIRD )
DECLARE_TERNARY ( Expr_Mul3_c, EVALFIRST*EVALSECOND*EVALTHIRD, INTFIRST*INTSECOND*INTTHIRD, INT64FIRST*INT64SECOND*INT64THIRD )
//////////////////////////////////////////////////////////////////////////
// UDF CALL SITE
//////////////////////////////////////////////////////////////////////////
static void * UdfMalloc ( int iLen )
{
return new BYTE [ iLen ];
}
/// UDF call site
struct UdfCall_t
{
PluginUDFRefPtr_c m_pUdf;
SPH_UDF_INIT m_tInit;
SPH_UDF_ARGS m_tArgs;
CSphVector<int> m_dArgs2Free; // these args should be freed explicitly
UdfCall_t()
{
m_tInit.func_data = nullptr;
m_tInit.is_const = false;
m_tArgs.arg_count = 0;
m_tArgs.arg_types = nullptr;
m_tArgs.arg_values = nullptr;
m_tArgs.arg_names = nullptr;
m_tArgs.str_lengths = nullptr;
m_tArgs.fn_malloc = UdfMalloc;
}
~UdfCall_t ()
{
SafeDeleteArray ( m_tArgs.arg_types );
SafeDeleteArray ( m_tArgs.arg_values );
SafeDeleteArray ( m_tArgs.arg_names );
SafeDeleteArray ( m_tArgs.str_lengths );
}
};
//////////////////////////////////////////////////////////////////////////
// PARSER INTERNALS
//////////////////////////////////////////////////////////////////////////
class ExprParser_t;
#include "bissphinxexpr.h"
/// known operations, columns and functions
enum Tokh_e : BYTE
{
// functions came first
FUNC_NOW = 0,
FUNC_ABS,
FUNC_CEIL,
FUNC_FLOOR,
FUNC_SIN,
FUNC_COS,
FUNC_LN,
FUNC_LOG2,
FUNC_LOG10,
FUNC_EXP,
FUNC_SQRT,
FUNC_BIGINT,
FUNC_SINT,
FUNC_CRC32,
FUNC_FIBONACCI,
FUNC_KNN_DIST,
FUNC_DAY,
FUNC_WEEK,
FUNC_MONTH,
FUNC_YEAR,
FUNC_YEARMONTH,
FUNC_YEARMONTHDAY,
FUNC_YEARWEEK,
FUNC_HOUR,
FUNC_MINUTE,
FUNC_SECOND,
FUNC_DAYOFWEEK,
FUNC_DAYOFYEAR,
FUNC_QUARTER,
FUNC_MIN,
FUNC_MAX,
FUNC_POW,
FUNC_IDIV,
FUNC_IF,
FUNC_MADD,
FUNC_MUL3,
FUNC_INTERVAL,
FUNC_IN,
FUNC_BITDOT,
FUNC_REMAP,
FUNC_GEODIST,
FUNC_EXIST,
FUNC_POLY2D,
FUNC_GEOPOLY2D,
FUNC_CONTAINS,
FUNC_ZONESPANLIST,
FUNC_CONCAT,
FUNC_TO_STRING,
FUNC_RANKFACTORS,
FUNC_FACTORS,
FUNC_BM25F,
FUNC_INTEGER,
FUNC_DOUBLE,
FUNC_LENGTH,
FUNC_LEAST,
FUNC_GREATEST,
FUNC_UINT,
FUNC_UINT64,
FUNC_QUERY,
FUNC_CURTIME,
FUNC_CURDATE,
FUNC_TIME,
FUNC_DATE,
FUNC_DAYNAME,
FUNC_MONTHNAME,
FUNC_UTC_TIME,
FUNC_UTC_TIMESTAMP,
FUNC_TIMEDIFF,
FUNC_DATEDIFF,
FUNC_DATEADD,
FUNC_DATESUB,
FUNC_CURRENT_USER,
FUNC_CONNECTION_ID,
FUNC_ALL,
FUNC_ANY,
FUNC_INDEXOF,
FUNC_MIN_TOP_WEIGHT,
FUNC_MIN_TOP_SORTVAL,
FUNC_ATAN2,
FUNC_RAND,
FUNC_REGEX,
FUNC_SUBSTRING_INDEX,
FUNC_UPPER,
FUNC_LOWER,
FUNC_LAST_INSERT_ID,
FUNC_LEVENSHTEIN,
FUNC_DATE_FORMAT,
FUNC_DATABASE,
FUNC_USER,
FUNC_VERSION,
FUNC_RANGE,
FUNC_HISTOGRAM,
FUNC_DATE_RANGE,
FUNC_DATE_HISTOGRAM,
FUNC_UUID_SHORT,
FUNC_FUNCS_COUNT, // insert any new functions ABOVE this one
TOKH_TOKH_OFFSET = FUNC_FUNCS_COUNT,
// general operations and operators
TOKH_COUNT = TOKH_TOKH_OFFSET, // check m.b. column with name 'count' exists, and override, if so
TOKH_WEIGHT,
TOKH_GROUPBY,
TOKH_DISTINCT,
TOKH_AND,
TOKH_OR,
TOKH_NOT,
TOKH_DIV,
TOKH_MOD,
TOKH_FOR,
TOKH_IS,
TOKH_NULL,
TOKH_TOKH_COUNT,
TOKH_UNKNOWN = TOKH_TOKH_COUNT
};
// dHash2Op [i-FUNC_FUNCS_COUNT] returns 1:1 mapping from hash to the token
const static int g_dHash2Op[TOKH_TOKH_COUNT-TOKH_TOKH_OFFSET] = { TOK_COUNT, TOK_WEIGHT, TOK_GROUPBY, TOK_DISTINCT, TOK_AND, TOK_OR, TOK_NOT, TOK_DIV, TOK_MOD, TOK_FOR, TOK_IS, TOK_NULL, };
struct TokhKeyVal_t
{
const char * m_sName = nullptr;
Tokh_e m_eTok = TOKH_UNKNOWN;
int m_iLen = 0;
TokhKeyVal_t ( const char * sName, Tokh_e eTok )
: m_sName ( sName )
, m_eTok ( eTok )
{
m_iLen = (int)strlen ( m_sName );
}
};
const static TokhKeyVal_t g_dKeyValTokens[] = // no order is necessary, but created hash may depend from it a bit
{
// functions
{ "now", FUNC_NOW },
{ "abs", FUNC_ABS },
{ "ceil", FUNC_CEIL },
{ "floor", FUNC_FLOOR },
{ "sin", FUNC_SIN },
{ "cos", FUNC_COS },
{ "ln", FUNC_LN },
{ "log2", FUNC_LOG2 },
{ "log10", FUNC_LOG10 },
{ "exp", FUNC_EXP },
{ "sqrt", FUNC_SQRT },
{ "bigint", FUNC_BIGINT }, // type-enforcer special as-if-function
{ "sint", FUNC_SINT }, // type-enforcer special as-if-function
{ "crc32", FUNC_CRC32 },
{ "fibonacci", FUNC_FIBONACCI },
{ "knn_dist", FUNC_KNN_DIST },
{ "day", FUNC_DAY },
{ "week", FUNC_WEEK },
{ "month", FUNC_MONTH },
{ "year", FUNC_YEAR },
{ "yearmonth", FUNC_YEARMONTH },
{ "yearmonthday", FUNC_YEARMONTHDAY },
{ "yearweek", FUNC_YEARWEEK },
{ "hour", FUNC_HOUR },
{ "minute", FUNC_MINUTE },
{ "second", FUNC_SECOND },
{ "dayofweek", FUNC_DAYOFWEEK },
{ "dayofyear", FUNC_DAYOFYEAR },
{ "quarter", FUNC_QUARTER },
{ "min", FUNC_MIN },
{ "max", FUNC_MAX },
{ "pow", FUNC_POW },
{ "idiv", FUNC_IDIV },
{ "if", FUNC_IF },
{ "madd", FUNC_MADD },
{ "mul3", FUNC_MUL3 },
{ "interval", FUNC_INTERVAL },
{ "in", FUNC_IN },
{ "bitdot", FUNC_BITDOT },
{ "remap", FUNC_REMAP },
{ "geodist", FUNC_GEODIST },
{ "exist", FUNC_EXIST },
{ "poly2d", FUNC_POLY2D },
{ "geopoly2d", FUNC_GEOPOLY2D },
{ "contains", FUNC_CONTAINS },
{ "zonespanlist", FUNC_ZONESPANLIST },
{ "concat", FUNC_CONCAT },
{ "to_string", FUNC_TO_STRING },
{ "rankfactors", FUNC_RANKFACTORS },
{ "packedfactors", FUNC_FACTORS },
{ "factors", FUNC_FACTORS }, // just an alias for PACKEDFACTORS()
{ "bm25f", FUNC_BM25F },
{ "integer", FUNC_INTEGER },
{ "double", FUNC_DOUBLE },
{ "length", FUNC_LENGTH },
{ "least", FUNC_LEAST },
{ "greatest", FUNC_GREATEST },
{ "uint", FUNC_UINT },
{ "uint64", FUNC_UINT64 },
{ "query", FUNC_QUERY },
{ "curtime", FUNC_CURTIME },
{ "curdate", FUNC_CURDATE },
{ "time", FUNC_TIME },
{ "date", FUNC_DATE },
{ "dayname", FUNC_DAYNAME },
{ "monthname", FUNC_MONTHNAME },
{ "utc_time", FUNC_UTC_TIME },
{ "utc_timestamp", FUNC_UTC_TIMESTAMP },
{ "timediff", FUNC_TIMEDIFF },
{ "datediff", FUNC_DATEDIFF },
{ "date_add", FUNC_DATEADD },
{ "date_sub", FUNC_DATESUB },
{ "current_user", FUNC_CURRENT_USER },
{ "connection_id", FUNC_CONNECTION_ID },
{ "all", FUNC_ALL },
{ "any", FUNC_ANY },
{ "indexof", FUNC_INDEXOF },
{ "min_top_weight", FUNC_MIN_TOP_WEIGHT },
{ "min_top_sortval",FUNC_MIN_TOP_SORTVAL },
{ "atan2", FUNC_ATAN2 },
{ "rand", FUNC_RAND },
{ "regex", FUNC_REGEX },
{ "substring_index",FUNC_SUBSTRING_INDEX },
{ "upper", FUNC_UPPER },
{ "lower", FUNC_LOWER },
{ "last_insert_id", FUNC_LAST_INSERT_ID },
{ "levenshtein", FUNC_LEVENSHTEIN },
{ "date_format", FUNC_DATE_FORMAT },
{ "database", FUNC_DATABASE },
{ "user", FUNC_USER },
{ "version", FUNC_VERSION },
{ "range", FUNC_RANGE },
{ "histogram", FUNC_HISTOGRAM },
{ "date_range", FUNC_DATE_RANGE },
{ "date_histogram", FUNC_DATE_HISTOGRAM },
{ "uuid_short", FUNC_UUID_SHORT },
// other reserved (operators, columns, etc.)
{ "count", TOKH_COUNT },
{ "weight", TOKH_WEIGHT },
{ "groupby", TOKH_GROUPBY },
{ "distinct", TOKH_DISTINCT },
{ "and", TOKH_AND },
{ "or", TOKH_OR },
{ "not", TOKH_NOT },
{ "div", TOKH_DIV },
{ "mod", TOKH_MOD },
{ "for", TOKH_FOR },
{ "is", TOKH_IS },
{ "null", TOKH_NULL },
};
// helper to generate input data for gperf
// change #if 0 to #if 1. Compile and run any code includes this file (for example, searchd or gmanticoretests)
// grap output and place to file '1.p'. Execute command printed at the end of 1.p.
// copy dAsso from asso_values in that C source
// modify iHash switch according to that C source, if needed
// compile and run the program and copy dIndexes from the output
// to ignore case: in asso table values 65..90 (A..Z) copy from 97..122 (a..z), and change strcmp to strcasecmp
#if 0
int HashGen()
{
printf ( "struct func { char *name; int num; };\n%%%%\n" );
for ( int i=0; i<int( sizeof ( g_dKeyValTokens )/sizeof ( g_dKeyValTokens[0] )); ++i )
printf ( "%s, %d\n", g_dKeyValTokens[i].m_sName, i );
printf ( "%%%%\n" );
printf ( "void main()\n" );
printf ( "{\n" );
printf ( "\tint i;\n" );
printf ( "\tfor ( i=0; i<=MAX_HASH_VALUE; ++i )\n" );
printf ( "\t\tprintf ( \"%%d,%%s\", wordlist[i].name[0] ? wordlist[i].num : -1, (i%%10)==9 ? \"\\n\" : \" \" );\n" );
printf ( "}\n" );
printf ( "// gperf -Gt -tm5000 1.p > 1.c\n" );
exit(0);
// sphDie ( "INTERNAL: HashGen() finished. Grab result, then change #if 1 to #if 0 few lines above %s:%d", __FILE__, __LINE__ );
}
static int G_HASHGEN = HashGen();
#endif
static Tokh_e TokHashLookup ( Str_t sKey )
{
assert ( sKey.first && sKey.second && sKey.first[0] );
const static BYTE dAsso[] =
{
167, 167, 167, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 167, 167, 167, 10, 10,
27, 49, 9, 6, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 29, 64, 14, 5, 5,
31, 25, 64, 6, 167, 14, 41, 7, 7, 38,
16, 16, 20, 13, 6, 36, 75, 58, 35, 32,
15, 167, 167, 167, 167, 49, 167, 29, 64, 14,
5, 5, 31, 25, 64, 6, 167, 14, 41, 7,
7, 38, 16, 16, 20, 13, 6, 36, 75, 58,
35, 32, 15, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 167
};
const static short dIndexes[] =
{
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, 37, -1, -1, -1, -1,
104, 106, 102, -1, 29, 63, 34, 62, -1, 70,
4, 93, -1, 87, 65, 41, 12, 94, 99, 33,
9, 80, 100, 5, 52, 45, 73, 46, 44, 10,
6, 61, 60, 88, 76, 69, 64, 68, 1, 57,
101, 24, 91, 95, 58, 48, 36, -1, 96, -1,
49, 50, 16, 56, 105, -1, 25, 39, 59, 85,
30, 40, 83, 77, 15, 89, 72, 38, 71, 18,
81, 8, 28, 43, 55, 17, 75, 79, 26, 92,
42, 97, 47, 22, 27, 19, 2, 11, -1, 13,
-1, -1, 66, -1, 74, -1, 53, -1, -1, 78,
-1, -1, 90, -1, 7, 21, 0, -1, 67, 84,
-1, -1, 3, 51, 107, 31, -1, -1, 98, 86,
82, -1, -1, 54, 23, -1, -1, -1, 14, -1,
35, -1, -1, -1, 20, -1, -1, -1, 103, -1,
-1, -1, -1, -1, -1, -1, 32
};
auto * s = (const BYTE*) sKey.first;
auto iLen = sKey.second;
auto iHash = iLen;
switch ( iHash )
{
default:
iHash += dAsso[(unsigned char)s[3]];
/*FALLTHROUGH*/
case 3:
iHash += dAsso[(unsigned char)s[2]];
/*FALLTHROUGH*/
case 2:
case 1:
iHash += dAsso[(unsigned char)s[0]];
break;
}
iHash += dAsso[(unsigned char)s[iLen-1]];
if ( iHash>=(int)(sizeof(dIndexes)/sizeof(dIndexes[0])) )
return TOKH_UNKNOWN;
auto iFunc = dIndexes[iHash];
if ( iFunc>=0 && strncasecmp ( g_dKeyValTokens[iFunc].m_sName, sKey.first, iLen )==0 && g_dKeyValTokens[iFunc].m_iLen==iLen )
return g_dKeyValTokens[iFunc].m_eTok;
return TOKH_UNKNOWN;
}
static int TokHashCheck()
{
// //in asso table values 65..90 (A..Z) copy from 97..122 (a..z), and change strcmp to strcasecmp
// //values 66..91 (A..Z) copy from 98..123 (a..z)
//int iFrom = 65;
//int iTo = 90;
//int iBase = 97;
//for ( int i=0; i<=iTo-iFrom; i++ )
// g_dAsso[iFrom+i] = g_dAsso[iBase+i];
//for ( int i=0; i<sizeof(g_dAsso); i++ )
//{
// if ( (i%10)==0 )
// printf ( "\n");
// printf ( "%d, ", g_dAsso[i] );
//}
for ( const auto & kv : g_dKeyValTokens )
{
CSphString sKey ( kv.m_sName );
sKey.ToLower ();
Str_t sKeyStr { sKey.cstr (), sKey.Length () };
auto uHash = TokHashLookup ( sKeyStr );
if ( uHash!=kv.m_eTok )
sphDie ( "INTERNAL ERROR: lookup for %s failed, got %d, expected %d, rebuild token hash", sKey.cstr(), uHash, kv.m_eTok );
sKey.ToUpper ();
uHash = TokHashLookup ( sKeyStr );
if ( uHash!=kv.m_eTok )
sphDie ( "INTERNAL ERROR: lookup for %s failed, got %d, expected %d, rebuild token hash", sKey.cstr(), uHash, kv.m_eTok );
}
if ( TokHashLookup ( { "A", 1 } )!=TOKH_UNKNOWN )
sphDie ( "INTERNAL ERROR: lookup for A() succeeded, rebuild token hash" );
return 1;
}
static int VARIABLE_IS_NOT_USED G_FUNC_HASH_CHECK = TokHashCheck();
// additional functions traits
struct FuncDesc_t
{
//const char * m_sName;
int m_iArgs; // positive assume exact N, negative assume 'at least'
int m_iNodeType; // usually TOK_FUNC, but sometimes not
// Tokh_e m_eFunc;
ESphAttr m_eRet;
};
static FuncDesc_t g_dFuncs[FUNC_FUNCS_COUNT] = // Keep same order as in Tokh_e
{
{ /*"now", */ 0, TOK_FUNC, /*FUNC_NOW, */ SPH_ATTR_INTEGER },
{ /*"abs", */ 1, TOK_FUNC, /*FUNC_ABS, */ SPH_ATTR_NONE },
{ /*"ceil", */ 1, TOK_FUNC, /*FUNC_CEIL, */ SPH_ATTR_BIGINT },
{ /*"floor", */ 1, TOK_FUNC, /*FUNC_FLOOR, */ SPH_ATTR_BIGINT },
{ /*"sin", */ 1, TOK_FUNC, /*FUNC_SIN, */ SPH_ATTR_FLOAT },
{ /*"cos", */ 1, TOK_FUNC, /*FUNC_COS, */ SPH_ATTR_FLOAT },
{ /*"ln", */ 1, TOK_FUNC, /*FUNC_LN, */ SPH_ATTR_FLOAT },
{ /*"log2", */ 1, TOK_FUNC, /*FUNC_LOG2, */ SPH_ATTR_FLOAT },
{ /*"log10", */ 1, TOK_FUNC, /*FUNC_LOG10, */ SPH_ATTR_FLOAT },
{ /*"exp", */ 1, TOK_FUNC, /*FUNC_EXP, */ SPH_ATTR_FLOAT },
{ /*"sqrt", */ 1, TOK_FUNC, /*FUNC_SQRT, */ SPH_ATTR_FLOAT },
{ /*"bigint", */ 1, TOK_FUNC, /*FUNC_BIGINT, */ SPH_ATTR_BIGINT }, // type-enforcer special as-if-function
{ /*"sint", */ 1, TOK_FUNC, /*FUNC_SINT, */ SPH_ATTR_BIGINT }, // type-enforcer special as-if-function
{ /*"crc32", */ 1, TOK_FUNC, /*FUNC_CRC32, */ SPH_ATTR_INTEGER },
{ /*"fibonacci", */ 1, TOK_FUNC, /*FUNC_FIBONACCI, */ SPH_ATTR_INTEGER },
{ /*"knn_dist"", */ 0, TOK_FUNC, /*FUNC_KNN_DIST, */ SPH_ATTR_FLOAT },
{ /*"day", */ 1, TOK_FUNC_DAY, /*FUNC_DAY, */ SPH_ATTR_INTEGER },
{ /*"week", */ -1, TOK_FUNC_WEEK, /*FUNC_WEEK, */ SPH_ATTR_INTEGER },
{ /*"month", */ 1, TOK_FUNC_MONTH, /*FUNC_MONTH, */ SPH_ATTR_INTEGER },
{ /*"year", */ 1, TOK_FUNC_YEAR, /*FUNC_YEAR, */ SPH_ATTR_INTEGER },
{ /*"yearmonth", */ 1, TOK_FUNC, /*FUNC_YEARMONTH, */ SPH_ATTR_INTEGER },
{ /*"yearmonthday", */ 1, TOK_FUNC, /*FUNC_YEARMONTHDAY, */ SPH_ATTR_INTEGER },
{ /*"yearweek", */ 1, TOK_FUNC, /*FUNC_YEARWEEK, */ SPH_ATTR_INTEGER },
{ /*"hour", */ 1, TOK_FUNC_HOUR, /*FUNC_HOUR, */ SPH_ATTR_INTEGER },
{ /*"minute", */ 1, TOK_FUNC_MINUTE,/*FUNC_MINUTE, */ SPH_ATTR_INTEGER },
{ /*"second", */ 1, TOK_FUNC_SECOND,/*FUNC_SECOND, */ SPH_ATTR_INTEGER },
{ /*"dayofweek", */ 1, TOK_FUNC, /*FUNC_DAYOFWEEK, */ SPH_ATTR_INTEGER },
{ /*"dayofyear", */ 1, TOK_FUNC, /*FUNC_DAYOFYEAR, */ SPH_ATTR_INTEGER },
{ /*"quarter", */ 1, TOK_FUNC_QUARTER,/*FUNC_QUARTER, */ SPH_ATTR_INTEGER },
{ /*"min", */ 2, TOK_FUNC, /*FUNC_MIN, */ SPH_ATTR_NONE },
{ /*"max", */ 2, TOK_FUNC, /*FUNC_MAX, */ SPH_ATTR_NONE },
{ /*"pow", */ 2, TOK_FUNC, /*FUNC_POW, */ SPH_ATTR_FLOAT },
{ /*"idiv", */ 2, TOK_FUNC, /*FUNC_IDIV, */ SPH_ATTR_NONE },
{ /*"if", */ 3, TOK_FUNC, /*FUNC_IF, */ SPH_ATTR_NONE },
{ /*"madd", */ 3, TOK_FUNC, /*FUNC_MADD, */ SPH_ATTR_NONE },
{ /*"mul3", */ 3, TOK_FUNC, /*FUNC_MUL3, */ SPH_ATTR_NONE },
{ /*"interval", */ -2, TOK_FUNC_INTERVAL,/*FUNC_INTERVAL, */ SPH_ATTR_INTEGER },
{ /*"in", */ -1, TOK_FUNC_IN, /*FUNC_IN, */ SPH_ATTR_INTEGER },
{ /*"bitdot", */ -1, TOK_FUNC, /*FUNC_BITDOT, */ SPH_ATTR_NONE },
{ /*"remap", */ 4, TOK_FUNC_REMAP, /*FUNC_REMAP, */ SPH_ATTR_INTEGER },
{ /*"geodist", */ -4, TOK_FUNC, /*FUNC_GEODIST, */ SPH_ATTR_FLOAT },
{ /*"exist", */ 2, TOK_FUNC, /*FUNC_EXIST, */ SPH_ATTR_NONE },
{ /*"poly2d", */ -1, TOK_FUNC, /*FUNC_POLY2D, */ SPH_ATTR_POLY2D },
{ /*"geopoly2d", */ -1, TOK_FUNC, /*FUNC_GEOPOLY2D, */ SPH_ATTR_POLY2D },
{ /*"contains", */ 3, TOK_FUNC, /*FUNC_CONTAINS, */ SPH_ATTR_INTEGER },
{ /*"zonespanlist", */ 0, TOK_FUNC, /*FUNC_ZONESPANLIST, */ SPH_ATTR_STRINGPTR },
{ /*"concat", */ -1, TOK_FUNC, /*FUNC_CONCAT, */ SPH_ATTR_STRINGPTR },
{ /*"to_string", */ 1, TOK_FUNC, /*FUNC_TO_STRING, */ SPH_ATTR_STRINGPTR },
{ /*"rankfactors", */ 0, TOK_FUNC, /*FUNC_RANKFACTORS, */ SPH_ATTR_STRINGPTR },
{ /*"packedfactors",*/ 0, TOK_FUNC_PF, /*FUNC_FACTORS, */ SPH_ATTR_FACTORS }, // and also 'factors'
{ /*"bm25f", */ -2, TOK_FUNC, /*FUNC_BM25F, */ SPH_ATTR_FLOAT },
{ /*"integer", */ 1, TOK_FUNC, /*FUNC_INTEGER, */ SPH_ATTR_BIGINT },
{ /*"double", */ 1, TOK_FUNC, /*FUNC_DOUBLE, */ SPH_ATTR_FLOAT },
{ /*"length", */ 1, TOK_FUNC, /*FUNC_LENGTH, */ SPH_ATTR_INTEGER },
{ /*"least", */ 1, TOK_FUNC, /*FUNC_LEAST, */ SPH_ATTR_STRINGPTR },
{ /*"greatest", */ 1, TOK_FUNC, /*FUNC_GREATEST, */ SPH_ATTR_STRINGPTR },
{ /*"uint", */ 1, TOK_FUNC, /*FUNC_UINT, */ SPH_ATTR_INTEGER },
{ /*"uint64", */ 1, TOK_FUNC, /*FUNC_UINT64, */ SPH_ATTR_UINT64 },
{ /*"query", */ 0, TOK_FUNC, /*FUNC_QUERY, */ SPH_ATTR_STRINGPTR },
{ /*"curtime", */ 0, TOK_FUNC, /*FUNC_CURTIME, */ SPH_ATTR_STRINGPTR }, // also evals numerics
{ /*"curdate", */ 0, TOK_FUNC, /*FUNC_CURDATE, */ SPH_ATTR_STRINGPTR },
{ /*"time", */ 1, TOK_FUNC, /*FUNC_TIME, */ SPH_ATTR_STRINGPTR },
{ /*"date", */ 1, TOK_FUNC, /*FUNC_DATE, */ SPH_ATTR_STRINGPTR },
{ /*"dayname", */ 1, TOK_FUNC, /*FUNC_DAYNAME, */ SPH_ATTR_STRINGPTR },
{ /*"monthname", */ 1, TOK_FUNC, /*FUNC_MONTHNAME, */ SPH_ATTR_STRINGPTR },
{ /*"utc_time", */ 0, TOK_FUNC, /*FUNC_UTC_TIME, */ SPH_ATTR_STRINGPTR }, // also evals numerics
{ /*"utc_timestamp",*/ 0, TOK_FUNC, /*FUNC_UTC_TIMESTAMP, */ SPH_ATTR_STRINGPTR }, // also evals numerics
{ /*"timediff", */ 2, TOK_FUNC, /*FUNC_TIMEDIFF, */ SPH_ATTR_STRINGPTR }, // also evals numerics
{ /*"datediff", */ 2, TOK_FUNC, /*FUNC_DATEDIFF, */ SPH_ATTR_BIGINT },
{ /*"date_add", */ -1, TOK_FUNC_DATE, /*FUNC_DATEADD, */ SPH_ATTR_BIGINT },
{ /*"date_sub", */ -1, TOK_FUNC_DATE, /*FUNC_DATESUB, */ SPH_ATTR_BIGINT },
{ /*"current_user", */ 0, TOK_FUNC, /*FUNC_CURRENT_USER, */ SPH_ATTR_STRINGPTR },
{ /*"connection_id",*/ 0, TOK_FUNC, /*FUNC_CONNECTION_ID, */ SPH_ATTR_INTEGER },
{ /*"all", */ -1, TOK_FUNC_JA, /*FUNC_ALL, */ SPH_ATTR_INTEGER },
{ /*"any", */ -1, TOK_FUNC_JA, /*FUNC_ANY, */ SPH_ATTR_INTEGER },
{ /*"indexof", */ -1, TOK_FUNC_JA, /*FUNC_INDEXOF, */ SPH_ATTR_BIGINT },
{ /*"min_top_weight",*/ 0, TOK_FUNC, /*FUNC_MIN_TOP_WEIGHT, */ SPH_ATTR_INTEGER },
{ /*"min_top_sortval",*/ 0, TOK_FUNC, /*FUNC_MIN_TOP_SORTVAL, */ SPH_ATTR_FLOAT },
{ /*"atan2", */ 2, TOK_FUNC, /*FUNC_ATAN2, */ SPH_ATTR_FLOAT },
{ /*"rand", */ -1, TOK_FUNC_RAND, /*FUNC_RAND, */ SPH_ATTR_FLOAT },
{ /*"regex", */ 2, TOK_FUNC, /*FUNC_REGEX, */ SPH_ATTR_INTEGER },
{ /*"substring_index",*/ 3, TOK_FUNC, /*FUNC_SUBSTRING_INDEX, */ SPH_ATTR_STRINGPTR }, // also evals numerics
{ /*"upper", */ 1, TOK_FUNC, /*FUNC_UPPER, */ SPH_ATTR_STRINGPTR }, // also evals numerics
{ /*"lower", */ 1, TOK_FUNC, /*FUNC_LOWER, */ SPH_ATTR_STRINGPTR }, // also evals numerics
{ /*"last_insert_id",*/ 0, TOK_FUNC, /*FUNC_LAST_INSERT_ID, */ SPH_ATTR_STRINGPTR },
{ /*"levenshtein", */ -1, TOK_FUNC, /*FUNC_LEVENSHTEIN, */ SPH_ATTR_NONE },
{ /*"date_format", */ 2, TOK_FUNC, /*FUNC_DATE_FORMAT, */ SPH_ATTR_STRINGPTR },
{ /*"database", */ 0, TOK_FUNC, /*FUNC_DATABASE, */ SPH_ATTR_STRINGPTR },
{ /*"user", */ 0, TOK_FUNC, /*FUNC_USER, */ SPH_ATTR_STRINGPTR },
{ /*"version", */ 0, TOK_FUNC, /*FUNC_VERSION, */ SPH_ATTR_STRINGPTR },
{ /*"range", */ -2, TOK_FUNC, /*FUNC_RANGE, */ SPH_ATTR_INTEGER },
{ /*"histogram",*/ 2, TOK_FUNC, /*FUNC_HISTOGRAM, */ SPH_ATTR_INTEGER },
{ /*"date_range", */ -2, TOK_FUNC, /*FUNC_DATE_RANGE, */ SPH_ATTR_INTEGER },
{ /*"date_histogram",*/ 2, TOK_FUNC, /*FUNC_DATE_HISTOGRAM, */ SPH_ATTR_INTEGER },
{ /*"uuid_short",*/ 0, TOK_FUNC, /*FUNC_UUID_SHORT, */ SPH_ATTR_BIGINT },
};
static inline const char* FuncNameByHash ( int iFunc )
{
if ( iFunc<0 || iFunc >=FUNC_FUNCS_COUNT )
return ( "unknown");
static const char * dNames[FUNC_FUNCS_COUNT] =
{ "now", "abs", "ceil", "floor", "sin", "cos", "ln", "log2", "log10", "exp", "sqrt", "bigint"
, "sint", "crc32", "fibonacci", "knn_dist", "day", "week", "month", "year", "yearmonth"
, "yearmonthday", "yearweek", "hour", "minute", "second", "dayofweek", "dayofyear", "quarter"
, "min", "max", "pow", "idiv", "if", "madd", "mul3", "interval", "in", "bitdot", "remap"
, "geodist", "exist", "poly2d", "geopoly2d", "contains", "zonespanlist", "concat", "to_string"
, "rankfactors", "packedfactors", "bm25f", "integer", "double", "length", "least", "greatest"
, "uint", "uint64", "query", "curtime", "curdata", "time", "date", "dayname", "monthname"
, "utc_time", "utc_timestamp", "timediff", "datediff", "date_add", "date_sub", "current_user"
, "connection_id", "all", "any", "indexof", "min_top_weight", "min_top_sortval", "atan2", "rand"
, "regex", "substring_index", "upper", "lower", "last_insert_id", "levenshtein", "date_format"
, "database", "user", "version", "range", "histogram", "date_range", "date_histogram", "uuid_short" };
return dNames[iFunc];
}
// set of functions which evals to SPH_ATTR_STRINGPTR, but also can eval to numerics
static inline bool CanEvalNumbers ( int iFunc )
{
switch (iFunc)
{
case FUNC_CURTIME:
case FUNC_UTC_TIME:
case FUNC_UTC_TIMESTAMP:
case FUNC_TIMEDIFF:
case FUNC_SUBSTRING_INDEX:
case FUNC_UPPER:
case FUNC_LOWER:
case FUNC_DATE_FORMAT:
return true;
default: return false;
}
}
//////////////////////////////////////////////////////////////////////////
static ISphExpr * ConvertExprJson ( ISphExpr * pExpr );
static void ConvertArgsJson ( VecRefPtrs_t<ISphExpr*> & dArgs );
/// check whether the type is int or bigint
static inline bool IsInt ( ESphAttr eType )
{
return eType==SPH_ATTR_INTEGER || eType==SPH_ATTR_BIGINT;
}
/// check whether the type can be promoted to integer
static inline bool IsNumericLike ( ESphAttr eType )
{
switch ( eType )
{
case SPH_ATTR_INTEGER:
case SPH_ATTR_TIMESTAMP:
case SPH_ATTR_BOOL:
case SPH_ATTR_BIGINT:
case SPH_ATTR_TOKENCOUNT:
case SPH_ATTR_UINT64: return true;
default: return false;
}
}
/// check whether the type can be promoted to float
static inline bool IsFloatLike ( ESphAttr eType )
{
switch ( eType )
{
case SPH_ATTR_FLOAT:
case SPH_ATTR_DOUBLE: return true;
default: return false;
}
}
static inline bool IsJson ( ESphAttr eAttr )
{
return ( eAttr==SPH_ATTR_JSON_FIELD );
}
/// {title=2, body=1}
/// {in=deg, out=mi}
/// argument to functions like BM25F() and GEODIST()
class MapArg_c
{
public:
CSphVector<CSphNamedVariant> m_dPairs;
public:
void Add ( CSphString sKey, const char * sValue, int64_t iValue, float fValue, VariantType_e eType )
{
CSphNamedVariant & t = m_dPairs.Add();
t.m_sKey = std::move(sKey);
t.m_eType = eType;
switch ( eType )
{
case VariantType_e::BIGINT: t.m_iValue = iValue; break;
case VariantType_e::FLOAT: t.m_fValue = fValue; break;
case VariantType_e::STRING:
default: t.m_sValue = sValue;
};
}
};
/// expression tree node
/// used to build an AST (Abstract Syntax Tree)
struct ExprNode_t
{
int m_iToken = 0; ///< token type, including operators
ESphAttr m_eRetType { SPH_ATTR_NONE }; ///< result type
ESphAttr m_eArgType { SPH_ATTR_NONE }; ///< args type
CSphAttrLocator m_tLocator; ///< attribute locator, for TOK_ATTR type
int m_iLocator = -1; ///< index of attribute locator in schema
union
{
int64_t m_iConst; ///< constant value, for TOK_CONST_INT type
float m_fConst; ///< constant value, for TOK_CONST_FLOAT type
int m_iFunc; ///< built-in function id, for TOK_FUNC type
int m_iArgs; ///< args count, for arglist (token==',') type
ConstList_c * m_pConsts; ///< constants list, for TOK_CONST_LIST type
MapArg_c * m_pMapArg; ///< map argument (maps name to const or name to expr), for TOK_MAP_ARG type
const char * m_sIdent; ///< pointer to const char, for TOK_IDENT type
SphAttr_t * m_pAttr; ///< pointer to 64-bit value, for TOK_ITERATOR type
INIT_WITH_0 ( int64_t, float, int, ConstList_c*, MapArg_c*, const char*, SphAttr_t* );
};
int m_iLeft = -1;
int m_iRight = -1;
};
struct StackNode_t
{
int m_iNode;
int m_iLeft;
int m_iRight;
};
/// expression parser
class ExprParser_t
{
friend int yy1lex ( YYSTYPE * lvalp, void * yyscanner, ExprParser_t * pParser );
friend int yy1lex ( YYSTYPE *, ExprParser_t * );
friend int yylex ( YYSTYPE * lvalp, ExprParser_t * pParser );
friend int yyparse ( ExprParser_t * pParser );
friend void yyerror ( ExprParser_t * pParser, const char * sMessage );
public:
ExprParser_t ( ISphExprHook * pHook, QueryProfile_c * pProfiler, ESphCollation eCollation )
: m_pHook ( pHook )
, m_pProfiler ( pProfiler )
, m_eCollation ( eCollation )
{
m_dGatherStack.Reserve ( 64 );
}
~ExprParser_t ();
ISphExpr * Parse ( const char * szExpr, const ISphSchema & tSchema, const CSphString * pJoinIdx, ESphAttr * pAttrType, bool * pUsesWeight, CSphString & sError );
protected:
int m_iParsed = 0; ///< filled by yyparse() at the very end
CSphString m_sLexerError;
CSphString m_sParserError;
CSphString m_sCreateError;
ISphExprHook * m_pHook;
QueryProfile_c * m_pProfiler;
protected:
ESphAttr GetWidestRet ( int iLeft, int iRight );
int AddNodeInt ( int64_t iValue );
int AddNodeFloat ( float fValue );
int AddNodeString ( int64_t iValue );
int AddNodeAttr ( int iTokenType, uint64_t uAttrLocator );
int AddNodeField ( int iTokenType, uint64_t uAttrLocator );
int AddNodeColumnar ( int iTokenType, uint64_t uAttrLocator );
int AddNodeWeight();
int AddNodeOp ( int iOp, int iLeft, int iRight );
int AddNodeFunc0 ( int iFunc );
int AddNodeFunc ( int iFunc, int iArg );
int AddNodeFor ( int iFunc, int iExpr, int iLoop );
int AddNodeDate ( int iFunc, int iExpr1, int iExpr2, int iUnit );
int AddNodeIn ( int iArg, int iList );
int AddNodeRemap ( int iExpr1, int iExpr2, int iList1, int iList2 );
int AddNodeRand ( int iArg );
int AddNodeUdf ( int iCall, int iArg );
int AddNodePF ( int iFunc, int iArg );
int AddNodeConstlist ( int64_t iValue, bool bPackedString );
int AddNodeConstlist ( float iValue );
void AppendToConstlist ( int iNode, int64_t iValue );
void AppendToConstlist ( int iNode, float iValue );
int AddNodeUservar ( int iUservar );
int AddNodeHookIdent ( int iID );
int AddNodeHookFunc ( int iID, int iLeft );
int AddNodeHookFunc ( int iID );
int AddNodeMapArg ( const char * szKey, const char * szValue, int64_t iValue, float fValue, VariantType_e eType );
void AppendToMapArg ( int iNode, const char * szKey, const char * szValue, int64_t iValue, float fValue, VariantType_e eType );
const char * Attr2Ident ( uint64_t uAttrLoc );
const char * Field2Ident ( uint64_t uAttrLoc );
int AddNodeJsonField ( uint64_t uAttrLocator, int iLeft );
int AddNodeJsonSubkey ( int64_t iValue );
int AddNodeDotNumber ( int64_t iValue );
int AddNodeIdent ( const char * sKey, int iLeft );
int AddNodeWithTable ( const char * szTable, uint64_t uOffset );
uint64_t ParseAttrWithTable ( const char * szTable, uint64_t uOffset );
private:
void * m_pScanner = nullptr;
Str_t m_sExpr;
const ISphSchema * m_pSchema = nullptr;
const CSphString * m_pJoinIdx = nullptr;
CSphVector<ExprNode_t> m_dNodes;
StrVec_t m_dUservars;
CSphVector<char*> m_dIdents;
int m_iConstNow = 0;
CSphVector<StackNode_t> m_dGatherStack;
CSphVector<UdfCall_t*> m_dUdfCalls;
public:
bool m_bHasZonespanlist = false;
DWORD m_uPackedFactorFlags { SPH_FACTOR_DISABLE };
ESphEvalStage m_eEvalStage { SPH_EVAL_FINAL };
ESphCollation m_eCollation;
DWORD m_uStoredField = CSphColumnInfo::FIELD_NONE;
bool m_bNeedDocIds = false;
private:
bool CheckGeodist ( YYSTYPE * lvalp );
void AddUservar ( const char * sBegin, int iLen, YYSTYPE * lvalp );
int ProcessRawToken ( const char * sBegin, int iLen, YYSTYPE * lvalp );
int ProcessAtRawToken ( const char * sBegin, int iLen, YYSTYPE * lvalp );
int ErrLex ( const char * sTemplate, ...); // issue lexer error
int CheckForFields ( Tokh_e eTok, YYSTYPE * lvalp );
CSphVector<int> GatherArgTypes ( int iNode );
CSphVector<int> GatherArgNodes ( int iNode );
void GatherArgRetTypes ( int iNode, CSphVector<ESphAttr> & dTypes );
template < typename FN >
void GatherArgFN ( int iNode, FN && fnFunctor );
bool CheckForConstSet ( int iArgsNode, int iSkip );
int ParseAttr ( int iAttr, const char* sTok, YYSTYPE * lvalp );
static int ParseField ( int iField, const char* sTok, YYSTYPE * lvalp );
int ParseAttrsAndFields ( const char * szTok, YYSTYPE * lvalp );
int ParseJoinAttr ( const char * szTable, uint64_t uOffset );
template < typename T >
void WalkTree ( int iRoot, T & FUNCTOR );
void Optimize ( int iNode );
void CanonizePass ( int iNode );
void ConstantFoldPass ( int iNode );
void VariousOptimizationsPass ( int iNode );
void MultiNEPass ( int iNode );
bool MultiNEMatch ( const ExprNode_t * pLeft, const ExprNode_t * pRight, ExprNode_t & tRes, CSphVector<int64_t> & dValues );
bool TransformNENE ( ExprNode_t * pRoot, ExprNode_t * pLeft, ExprNode_t * pRight );
bool TransformInNE ( ExprNode_t * pRoot, ExprNode_t * pLeft, ExprNode_t * pRight );
void Dump ( int iNode );
ISphExpr * CreateTree ( int iNode );
ISphExpr * CreateIntervalNode ( int iArgsNode, CSphVector<ISphExpr *> & dArgs );
ISphExpr * CreateInNode ( int iNode );
ISphExpr * CreateLengthNode ( const ExprNode_t & tNode, ISphExpr * pLeft );
ISphExpr * CreateGeodistNode ( int iArgs );
ISphExpr * CreatePFNode ( int iArg );
ISphExpr * CreateBitdotNode ( int iArgsNode, CSphVector<ISphExpr *> & dArgs );
ISphExpr * CreateUdfNode ( int iCall, ISphExpr * pLeft );
ISphExpr * CreateExistNode ( const ExprNode_t & tNode );
ISphExpr * CreateContainsNode ( const ExprNode_t & tNode );
ISphExpr * CreateAggregateNode ( const ExprNode_t & tNode, ESphAggrFunc eFunc, ISphExpr * pLeft );
ISphExpr * CreateForInNode ( int iNode );
ISphExpr * CreateExprDateAdd ( int iNode, bool bAdd );
ISphExpr * CreateRegexNode ( ISphExpr * pAttr, ISphExpr * pString );
ISphExpr * CreateConcatNode ( int iArgsNode, CSphVector<ISphExpr *> & dArgs );
ISphExpr * CreateFieldNode ( int iField );
ISphExpr * CreateColumnarIntNode ( int iAttr, ESphAttr eAttrType );
ISphExpr * CreateColumnarFloatNode ( int iAttr );
ISphExpr * CreateColumnarStringNode ( int iAttr );
ISphExpr * CreateColumnarMvaNode ( int iAttr );
void FixupIterators ( int iNode, const char * sKey, SphAttr_t * pAttr );
ISphExpr * CreateLevenshteinNode ( ISphExpr * pPattern, ISphExpr * pAttr, ISphExpr * pOpts );
ISphExpr * CreateCmp ( const ExprNode_t & tNode, ISphExpr * pLeft, ISphExpr * pRight );
bool CheckStoredArg ( ISphExpr * pExpr );
bool PrepareFuncArgs ( const ExprNode_t & tNode, bool bSkipChildren, CSphRefcountedPtr<ISphExpr> & pLeft, CSphRefcountedPtr<ISphExpr> & pRight, VecRefPtrs_t<ISphExpr*> & dArgs );
ISphExpr * CreateFuncExpr ( int iNode, VecRefPtrs_t<ISphExpr*> & dArgs );
CSphString GetNameByLocator ( int iNode ) const;
CSphString GetNameByLocator ( const ExprNode_t & tNode ) const;
bool GetError () const { return !( m_sLexerError.IsEmpty() && m_sParserError.IsEmpty() && m_sCreateError.IsEmpty() ); }
bool GetCreateError () const { return !m_sCreateError.IsEmpty(); }
ISphExpr * Create ( bool * pUsesWeight, CSphString & sError );
};
//////////////////////////////////////////////////////////////////////////
// used to store in 8 bytes in Bison lvalp variable
static uint64_t sphPackAttrLocator ( const CSphAttrLocator & tLoc, int iLocator )
{
assert ( iLocator>=0 && iLocator<=0x7fff );
uint64_t uIndex = 0;
bool bBlob = tLoc.m_iBlobAttrId>=0;
if ( bBlob )
{
assert ( tLoc.m_nBlobAttrs>=0 && tLoc.m_nBlobAttrs<=0x7fff );
uIndex = tLoc.m_iBlobAttrId + ( tLoc.m_nBlobAttrs<<15 ) + ( tLoc.m_iBlobRowOffset<<30 ) + ( (uint64_t)iLocator<<32 );
}
else
uIndex = tLoc.m_iBitCount + ( tLoc.m_iBitOffset<<16 ) + ( (uint64_t)iLocator<<32 );
if ( tLoc.m_bDynamic )
uIndex |= ( U64C(1)<<63 );
if ( bBlob )
uIndex |= ( U64C(1)<<62 );
return uIndex;
}
static void sphUnpackAttrLocator ( uint64_t uIndex, ExprNode_t * pNode )
{
assert ( pNode );
bool bBlob = ( uIndex & ( U64C(1)<<62 ) )!=0;
if ( bBlob )
{
pNode->m_tLocator.m_iBlobAttrId = (int)( uIndex & 0x7fff );
pNode->m_tLocator.m_nBlobAttrs = (int)( ( uIndex>>15 ) & 0x7fff );
pNode->m_tLocator.m_iBlobRowOffset = (int)( ( uIndex>>30 ) & 1 );
pNode->m_tLocator.m_iBitCount = -1;
pNode->m_tLocator.m_iBitOffset = -1;
}
else
{
pNode->m_tLocator.m_iBitCount = (int)( uIndex & 0xffff );
pNode->m_tLocator.m_iBitOffset = (int)( ( uIndex>>16 ) & 0xffff );
pNode->m_tLocator.m_iBlobAttrId = -1;
pNode->m_tLocator.m_nBlobAttrs = 0;
}
pNode->m_tLocator.m_bDynamic = ( ( uIndex & ( U64C(1)<<63 ) )!=0 );
pNode->m_iLocator = (int)( ( uIndex>>32 ) & 0x7fff );
}
static int GetConstStrOffset ( const ExprNode_t & tNode )
{
return GetConstStrOffset ( tNode.m_iConst );
}
static int GetConstStrLength ( const ExprNode_t & tNode )
{
return GetConstStrLength ( tNode.m_iConst );
}
/// format error
int ExprParser_t::ErrLex ( const char * sTemplate, ... )
{
va_list ap;
va_start ( ap, sTemplate );
m_sLexerError.SetSprintfVa ( sTemplate, ap );
va_end ( ap );
return -1;
}
static int ConvertToColumnarType ( ESphAttr eAttr )
{
switch ( eAttr )
{
case SPH_ATTR_INTEGER: return TOK_COLUMNAR_INT;
case SPH_ATTR_TIMESTAMP: return TOK_COLUMNAR_TIMESTAMP;
case SPH_ATTR_FLOAT: return TOK_COLUMNAR_FLOAT;
case SPH_ATTR_BIGINT: return TOK_COLUMNAR_BIGINT;
case SPH_ATTR_BOOL: return TOK_COLUMNAR_BOOL;
case SPH_ATTR_STRING: return TOK_COLUMNAR_STRING;
case SPH_ATTR_UINT32SET: return TOK_COLUMNAR_UINT32SET;
case SPH_ATTR_INT64SET: return TOK_COLUMNAR_INT64SET;
case SPH_ATTR_FLOAT_VECTOR: return TOK_COLUMNAR_FLOATVEC;
default:
assert ( 0 && "Unknown columnar type" );
return -1;
}
}
int ExprParser_t::ParseAttr ( int iAttr, const char* sTok, YYSTYPE * lvalp )
{
// check attribute type and width
const CSphColumnInfo & tCol = m_pSchema->GetAttr ( iAttr );
// check for a duplicate attribute created for showing a stored field in the result set
if ( tCol.m_uFieldFlags & CSphColumnInfo::FIELD_STORED )
{
const CSphVector<CSphColumnInfo> & dFields = m_pSchema->GetFields();
ARRAY_FOREACH ( i, dFields )
if ( dFields[i].m_sName==tCol.m_sName )
{
lvalp->iAttrLocator = i;
return TOK_FIELD;
}
}
if ( tCol.IsColumnar() )
{
lvalp->iAttrLocator = iAttr;
return ConvertToColumnarType ( tCol.m_eAttrType );
}
int iRes = -1;
switch ( tCol.m_eAttrType )
{
case SPH_ATTR_FLOAT: iRes = TOK_ATTR_FLOAT; break;
case SPH_ATTR_UINT32SET:
case SPH_ATTR_UINT32SET_PTR: iRes = TOK_ATTR_MVA32; break;
case SPH_ATTR_INT64SET:
case SPH_ATTR_INT64SET_PTR: iRes = TOK_ATTR_MVA64; break;
case SPH_ATTR_STRING:
case SPH_ATTR_STRINGPTR: iRes = TOK_ATTR_STRING; break;
case SPH_ATTR_JSON:
case SPH_ATTR_JSON_PTR:
case SPH_ATTR_JSON_FIELD:
case SPH_ATTR_JSON_FIELD_PTR: iRes = TOK_ATTR_JSON; break;
case SPH_ATTR_FACTORS: iRes = TOK_ATTR_FACTORS; break;
case SPH_ATTR_INTEGER:
case SPH_ATTR_TIMESTAMP:
case SPH_ATTR_BOOL:
case SPH_ATTR_BIGINT:
case SPH_ATTR_TOKENCOUNT:
iRes = tCol.m_tLocator.IsBitfield() ? TOK_ATTR_BITS : TOK_ATTR_INT;
break;
default:
m_sLexerError.SetSprintf ( "attribute '%s' is of unsupported type (type=%d)", sTok, tCol.m_eAttrType );
return -1;
}
lvalp->iAttrLocator = sphPackAttrLocator ( tCol.m_tLocator, iAttr );
return iRes;
}
int ExprParser_t::ParseField ( int iField, const char* sTok, YYSTYPE * lvalp )
{
lvalp->iAttrLocator = iField;
return TOK_FIELD;
}
bool ExprParser_t::CheckGeodist ( YYSTYPE * lvalp )
{
int iGeodist = m_pSchema->GetAttrIndex ( "@geodist" );
if ( iGeodist==-1 )
return false;
const CSphAttrLocator & tLoc = m_pSchema->GetAttr ( iGeodist ).m_tLocator;
lvalp->iAttrLocator = sphPackAttrLocator ( tLoc, iGeodist );
return true;
}
void ExprParser_t::AddUservar ( const char* sBegin, int iLen, YYSTYPE * lvalp )
{
lvalp->iNode = m_dUservars.GetLength ();
CSphString sTok { sBegin, iLen };
m_dUservars.Add ( sTok );
}
int ExprParser_t::ParseAttrsAndFields ( const char * szTok, YYSTYPE * lvalp )
{
// check for attribute
int iCol = m_pSchema->GetAttrIndex ( szTok );
if ( iCol>=0 )
return ParseAttr ( iCol, szTok, lvalp );
// check for field
iCol = m_pSchema->GetFieldIndex ( szTok );
if ( iCol>=0 )
return ParseField ( iCol, szTok, lvalp );
return -1;
}
// process tokens starting with @
int ExprParser_t::ProcessAtRawToken ( const char * sBegin, int iLen, YYSTYPE * lvalp )
{
int iRes = -1;
if ( strncasecmp ( sBegin, "@id", iLen )==0 )
return ParseAttrsAndFields ( "id", lvalp );
else if ( strncasecmp ( sBegin, "@geodist", iLen )==0 )
{
iRes = ParseAttrsAndFields ( "@geodist", lvalp );
if (iRes<0)
m_sLexerError = "geoanchor is not set, @geodist expression unavailable";
return iRes;
} else if ( strncasecmp ( sBegin, "@weight", iLen )==0 )
return TOK_ATWEIGHT;
CSphString sTok { sBegin, iLen };
lvalp->iNode = m_dUservars.GetLength ();
m_dUservars.Add ( sTok );
return TOK_USERVAR;
}
inline static bool IsFunc ( Tokh_e e )
{
return e<FUNC_FUNCS_COUNT;
}
inline static bool IsTok ( Tokh_e e )
{
return e<TOKH_TOKH_COUNT && e>=FUNC_FUNCS_COUNT;
}
int ExprParser_t::CheckForFields ( Tokh_e eTok, YYSTYPE * lvalp )
{
if ( eTok==TOKH_COUNT ) // in case someone used 'count' as a name for an attribute
return ParseAttrsAndFields ("count", lvalp);
if ( eTok==TOKH_WEIGHT ) // in case someone used 'weight' as a name for an attribute
return ParseAttrsAndFields ("weight", lvalp);
return -1;
}
// general flow: flex parser do most generic tokenization, and provides raw token.
// here we look it in the schema, perfect hash, overrides, udfs, etc. and provide concrete result
int ExprParser_t::ProcessRawToken ( const char * sToken, int iLen, YYSTYPE * lvalp )
{
int iRes = -1;
const bool bFunc = lvalp->iTrailingBr!=0;
if ( lvalp->iTrailingBr==2 ) // trim trailing [ \t\n\r]
{
while ( sphIsSpace ( sToken[iLen-1] ) )
--iLen;
}
lvalp->iTrailingBr = 0;
auto eTok = TokHashLookup ( { sToken, iLen } );
if ( IsTok(eTok) )
{
if ( !bFunc )
{
iRes = CheckForFields ( eTok, lvalp );
if ( iRes>=0 )
return iRes;
}
return g_dHash2Op[eTok-FUNC_FUNCS_COUNT];
}
CSphString sTok;
sTok.SetBinary ( sToken, iLen );
sTok.ToLower ();
// check for attributes and fields
if ( !bFunc )
{
iRes = ParseAttrsAndFields ( sTok.cstr(), lvalp );
if ( iRes>=0 )
return iRes;
}
// check for table name
if ( m_pJoinIdx && *m_pJoinIdx==sTok )
{
CSphString sTokMixed { sToken, iLen };
m_dIdents.Add ( sTokMixed.Leak() );
lvalp->sIdent = m_dIdents.Last();
return TOK_TABLE_NAME;
}
if ( sToken[0]=='@' )
return ProcessAtRawToken ( sToken, iLen, lvalp );
// ask hook func, it may override
if ( m_pHook && eTok==FUNC_BM25F ) // tiny ad-hoc - as only known override bm25f, so hardcode it
{
int iID = m_pHook->IsKnownFunc ( sTok.cstr () );
if ( iID>=0 )
{
lvalp->iFunc = iID;
return TOK_HOOK_FUNC;
}
}
// check for function
if ( IsFunc ( eTok ) )
{
lvalp->iFunc = eTok;
return g_dFuncs[eTok].m_iNodeType;
}
// ask hook ident
if ( m_pHook )
{
int iID = m_pHook->IsKnownFunc ( sTok.cstr () );
if ( iID>=0 )
{
lvalp->iFunc = iID;
return TOK_HOOK_FUNC;
}
iID = m_pHook->IsKnownIdent ( sTok.cstr () );
if ( iID>=0 )
{
lvalp->iNode = iID;
return TOK_HOOK_IDENT;
}
}
// check for UDF
auto pUdf = PluginGet<PluginUDF_c> ( PLUGIN_FUNCTION, sTok.cstr() );
if ( pUdf )
{
lvalp->iNode = m_dUdfCalls.GetLength();
m_dUdfCalls.Add ( new UdfCall_t() );
m_dUdfCalls.Last()->m_pUdf = std::move ( pUdf );
return TOK_UDF;
}
// arbitrary identifier, then
CSphString sTokMixed { sToken, iLen };
m_dIdents.Add ( sTokMixed.Leak() );
lvalp->sIdent = m_dIdents.Last();
return TOK_IDENT;
}
/// is add/sub?
static inline bool IsAddSub ( const ExprNode_t * pNode )
{
if ( pNode )
return pNode->m_iToken=='+' || pNode->m_iToken=='-';
assert ( 0 && "null node passed to IsAddSub()" );
return false;
}
/// is unary operator?
static inline bool IsUnary ( const ExprNode_t * pNode )
{
if ( pNode )
return pNode->m_iToken==TOK_NEG || pNode->m_iToken==TOK_NOT;
assert ( 0 && "null node passed to IsUnary() ");
return false;
}
/// is arithmetic?
static inline bool IsAri ( const ExprNode_t * pNode )
{
if ( pNode )
{
int iTok = pNode->m_iToken;
return iTok=='+' || iTok=='-' || iTok=='*' || iTok=='/';
}
assert ( 0 && "null node passed to IsAri()" );
return false;
}
/// is constant?
static inline bool IsConst ( const ExprNode_t * pNode )
{
if ( pNode )
return pNode->m_iToken==TOK_CONST_INT || pNode->m_iToken==TOK_CONST_FLOAT;
assert ( 0 && "null node passed to IsConst()" );
return false;
}
/// float value of a constant
static inline float FloatVal ( const ExprNode_t * pNode )
{
assert ( IsConst(pNode) );
return pNode->m_iToken==TOK_CONST_INT
? (float)pNode->m_iConst
: pNode->m_fConst;
}
void ExprParser_t::CanonizePass ( int iNode )
{
if ( iNode<0 )
return;
CanonizePass ( m_dNodes [ iNode ].m_iLeft );
CanonizePass ( m_dNodes [ iNode ].m_iRight );
ExprNode_t * pRoot = &m_dNodes [ iNode ];
ExprNode_t * pLeft = ( pRoot->m_iLeft>=0 ) ? &m_dNodes [ pRoot->m_iLeft ] : nullptr;
ExprNode_t * pRight = ( pRoot->m_iRight>=0 ) ? &m_dNodes [ pRoot->m_iRight ] : nullptr;
// canonize (expr op const), move const to the left
if ( pLeft && pRight && IsAri ( pRoot ) && !IsConst ( pLeft ) && IsConst ( pRight ) )
{
Swap ( pRoot->m_iLeft, pRoot->m_iRight );
Swap ( pLeft, pRight );
// fixup (expr-const) to ((-const)+expr)
if ( pRoot->m_iToken=='-' )
{
pRoot->m_iToken = '+';
if ( pLeft->m_iToken==TOK_CONST_INT )
pLeft->m_iConst *= -1;
else
pLeft->m_fConst *= -1;
}
// fixup (expr/const) to ((1/const)*expr)
if ( pRoot->m_iToken=='/' )
{
pRoot->m_iToken = '*';
pLeft->m_fConst = 1.0f / FloatVal ( pLeft );
pLeft->m_iToken = TOK_CONST_FLOAT;
}
}
// promote children constants
if ( pLeft && IsAri ( pRoot ) && IsAri ( pLeft ) && IsAddSub ( pLeft )==IsAddSub ( pRoot ) &&
IsConst ( &m_dNodes [ pLeft->m_iLeft ] ) )
{
// ((const op lr) op2 right) gets replaced with (const op (lr op2/op right))
// constant gets promoted one level up
int iConst = pLeft->m_iLeft;
int iCenter = pLeft->m_iRight;
int iRight = pRoot->m_iRight;
int iOpLeft = pLeft->m_iToken;
int iOp = pRoot->m_iToken;
int iOpNode = pRoot->m_iLeft;
// swap here is necessary in order to keep constraight (iRoot>iLeft) && (iRoot>iRight)
// that is, in turn, makes possible to create tree sequentally instead of recursive.
Swap ( m_dNodes[iOpNode], m_dNodes[iRight] );
Swap ( iOpNode, iRight );
switch ( iOpLeft )
{
case '+':
case '*':
// (c + lr) op r -> c + (lr op r)
// (c * lr) op r -> c * (lr op r)
Swap ( iOpLeft, iOp );
break;
case '-':
// (c - lr) + r -> c - (lr - r)
// (c - lr) - r -> c - (lr + r)
iOpLeft = ( iOp=='+' ? '-' : '+' );
iOp = '-';
break;
case '/':
// (c / lr) * r -> c * (r / lr)
// (c / lr) / r -> c / (r * lr)
Swap ( iCenter, iRight );
iOpLeft = ( iOp=='*' ? '/' : '*' );
break;
default:
assert ( 0 && "internal error: unhandled op in left-const promotion" );
}
pRoot->m_iLeft = iConst;
pRoot->m_iRight = iOpNode;
m_dNodes[iOpNode].m_iLeft = iCenter;
m_dNodes[iOpNode].m_iRight = iRight;
m_dNodes[iOpNode].m_iToken = iOpLeft;
pRoot->m_iToken = iOp;
}
}
void ExprParser_t::ConstantFoldPass ( int iNode )
{
if ( iNode<0 )
return;
ConstantFoldPass ( m_dNodes [ iNode ].m_iLeft );
ConstantFoldPass ( m_dNodes [ iNode ].m_iRight );
ExprNode_t * pRoot = &m_dNodes [ iNode ];
ExprNode_t * pLeft = ( pRoot->m_iLeft>=0 ) ? &m_dNodes [ pRoot->m_iLeft ] : nullptr;
ExprNode_t * pRight = ( pRoot->m_iRight>=0 ) ? &m_dNodes [ pRoot->m_iRight ] : nullptr;
// unary arithmetic expression with constant
if ( IsUnary ( pRoot ) && pLeft && IsConst ( pLeft ) )
{
if ( pLeft->m_iToken==TOK_CONST_INT )
{
switch ( pRoot->m_iToken )
{
case TOK_NEG: pRoot->m_iConst = -pLeft->m_iConst; break;
case TOK_NOT: pRoot->m_iConst = !pLeft->m_iConst; break;
default: assert ( 0 && "internal error: unhandled arithmetic token during const-int optimization" );
}
} else
{
switch ( pRoot->m_iToken )
{
case TOK_NEG: pRoot->m_fConst = -pLeft->m_fConst; break;
case TOK_NOT: pRoot->m_fConst = pLeft->m_fConst==0.0f; break;
default: assert ( 0 && "internal error: unhandled arithmetic token during const-float optimization" );
}
}
pRoot->m_iToken = pLeft->m_iToken;
pRoot->m_iLeft = -1;
pLeft->m_iToken = 0;
return;
}
// arithmetic expression with constants
if ( IsAri ( pRoot ) )
{
assert ( pLeft && pRight );
// optimize fully-constant expressions
if ( IsConst ( pLeft ) && IsConst ( pRight ) )
{
if ( pLeft->m_iToken==TOK_CONST_INT && pRight->m_iToken==TOK_CONST_INT && pRoot->m_iToken!='/' )
{
switch ( pRoot->m_iToken )
{
case '+': pRoot->m_iConst = pLeft->m_iConst + pRight->m_iConst; break;
case '-': pRoot->m_iConst = pLeft->m_iConst - pRight->m_iConst; break;
case '*': pRoot->m_iConst = pLeft->m_iConst * pRight->m_iConst; break;
default: assert ( 0 && "internal error: unhandled arithmetic token during const-int optimization" );
}
pRoot->m_iToken = TOK_CONST_INT;
} else
{
float fLeft = FloatVal ( pLeft );
float fRight = FloatVal ( pRight );
switch ( pRoot->m_iToken )
{
case '+': pRoot->m_fConst = fLeft + fRight; break;
case '-': pRoot->m_fConst = fLeft - fRight; break;
case '*': pRoot->m_fConst = fLeft * fRight; break;
case '/': pRoot->m_fConst = fRight!=0.0f ? fLeft / fRight : 0.0f; break; // FIXME! We don't have 'NULL', cant distinguish from 0.0f
default: assert ( 0 && "internal error: unhandled arithmetic token during const-float optimization" );
}
pRoot->m_iToken = TOK_CONST_FLOAT;
}
pRoot->m_iLeft = -1;
pLeft->m_iToken = 0;
pRoot->m_iRight = -1;
pRight->m_iToken = 0;
return;
}
if ( IsConst ( pLeft ) && IsAri ( pRight ) && IsAddSub ( pRoot )==IsAddSub ( pRight ) &&
IsConst ( &m_dNodes[pRight->m_iLeft] ) )
{
ExprNode_t * pConst = &m_dNodes[pRight->m_iLeft];
assert ( !IsConst ( &m_dNodes [ pRight->m_iRight ] ) ); // must had been optimized
// optimize (left op (const op2 expr)) to ((left op const) op*op2 expr)
if ( IsAddSub ( pRoot ) )
{
// fold consts
int iSign = ( ( pRoot->m_iToken=='+' ) ? 1 : -1 );
if ( pLeft->m_iToken==TOK_CONST_INT && pConst->m_iToken==TOK_CONST_INT )
{
pLeft->m_iConst += iSign*pConst->m_iConst;
} else
{
pLeft->m_iToken = TOK_CONST_FLOAT;
pLeft->m_fConst = FloatVal ( pLeft ) + iSign*FloatVal ( pConst );
}
// fold ops
pRoot->m_iToken = ( pRoot->m_iToken==pRight->m_iToken ) ? '+' : '-';
} else
{
// fold consts
if ( pRoot->m_iToken=='*' && pLeft->m_iToken==TOK_CONST_INT && pConst->m_iToken==TOK_CONST_INT )
{
pLeft->m_iConst *= pConst->m_iConst;
} else
{
if ( pRoot->m_iToken=='*' )
pLeft->m_fConst = FloatVal ( pLeft ) * FloatVal ( pConst );
else
pLeft->m_fConst = FloatVal ( pLeft ) / FloatVal ( pConst );
pLeft->m_iToken = TOK_CONST_FLOAT;
}
// fold ops
pRoot->m_iToken = ( pRoot->m_iToken==pRight->m_iToken ) ? '*' : '/';
}
// promote expr arg
pRoot->m_iRight = pRight->m_iRight;
pRight->m_iToken = 0;
}
}
// unary function from a constant
if ( pRoot->m_iToken==TOK_FUNC && g_dFuncs [ pRoot->m_iFunc ].m_iArgs==1 && IsConst ( pLeft ) )
{
float fArg = pLeft->m_iToken==TOK_CONST_FLOAT ? pLeft->m_fConst : float ( pLeft->m_iConst );
switch ( pRoot->m_iFunc )
{
case FUNC_ABS:
pRoot->m_iToken = pLeft->m_iToken;
pRoot->m_iLeft = -1;
if ( pLeft->m_iToken==TOK_CONST_INT )
pRoot->m_iConst = IABS ( pLeft->m_iConst );
else
pRoot->m_fConst = (float)fabs ( fArg );
pLeft->m_iToken = 0;
break;
case FUNC_CEIL: pRoot->m_iToken = TOK_CONST_INT; pRoot->m_iLeft = -1; pRoot->m_iConst = (int64_t)ceil ( fArg ); pLeft->m_iToken = 0; break;
case FUNC_FLOOR: pRoot->m_iToken = TOK_CONST_INT; pRoot->m_iLeft = -1; pRoot->m_iConst = (int64_t)floor ( fArg ); pLeft->m_iToken = 0; break;
case FUNC_SIN: pRoot->m_iToken = TOK_CONST_FLOAT; pRoot->m_iLeft = -1; pRoot->m_fConst = float ( sin ( fArg) ); pLeft->m_iToken = 0; break;
case FUNC_COS: pRoot->m_iToken = TOK_CONST_FLOAT; pRoot->m_iLeft = -1; pRoot->m_fConst = float ( cos ( fArg ) ); pLeft->m_iToken = 0; break;
case FUNC_LN: pRoot->m_iToken = TOK_CONST_FLOAT; pRoot->m_iLeft = -1; pRoot->m_fConst = fArg>0.0f ? (float) log(fArg) : 0.0f; pLeft->m_iToken = 0; break;
case FUNC_LOG2: pRoot->m_iToken = TOK_CONST_FLOAT; pRoot->m_iLeft = -1; pRoot->m_fConst = fArg>0.0f ? (float)( log(fArg)*M_LOG2E ) : 0.0f; pLeft->m_iToken = 0; break;
case FUNC_LOG10: pRoot->m_iToken = TOK_CONST_FLOAT; pRoot->m_iLeft = -1; pRoot->m_fConst = fArg>0.0f ? (float)( log(fArg)*M_LOG10E ) : 0.0f; pLeft->m_iToken = 0; break;
case FUNC_EXP: pRoot->m_iToken = TOK_CONST_FLOAT; pRoot->m_iLeft = -1; pRoot->m_fConst = float ( exp ( fArg ) ); pLeft->m_iToken = 0; break;
case FUNC_SQRT: pRoot->m_iToken = TOK_CONST_FLOAT; pRoot->m_iLeft = -1; pRoot->m_fConst = fArg>0.0f ? (float)sqrt(fArg) : 0.0f; pLeft->m_iToken = 0; break;
default: break;
}
return;
}
}
void ExprParser_t::VariousOptimizationsPass ( int iNode )
{
if ( iNode<0 )
return;
VariousOptimizationsPass ( m_dNodes [ iNode ].m_iLeft );
VariousOptimizationsPass ( m_dNodes [ iNode ].m_iRight );
ExprNode_t * pRoot = &m_dNodes [ iNode ];
int iLeft = pRoot->m_iLeft;
int iRight = pRoot->m_iRight;
ExprNode_t * pLeft = ( iLeft>=0 ) ? &m_dNodes [ iLeft ] : nullptr;
ExprNode_t * pRight = ( iRight>=0 ) ? &m_dNodes [ iRight ] : nullptr;
// madd, mul3
// FIXME! separate pass for these? otherwise (2+(a*b))+3 won't get const folding
if ( ( pRoot->m_iToken=='+' || pRoot->m_iToken=='*' ) && pLeft && pRight && ( pLeft->m_iToken=='*' || pRight->m_iToken=='*' ) )
{
if ( pLeft->m_iToken!='*' )
{
Swap ( pRoot->m_iLeft, pRoot->m_iRight );
Swap ( pLeft, pRight );
Swap ( iLeft, iRight );
}
pLeft->m_iToken = ',';
pRoot->m_iFunc = ( pRoot->m_iToken=='+' ) ? FUNC_MADD : FUNC_MUL3;
pRoot->m_iToken = TOK_FUNC;
return;
}
// division by a constant (replace with multiplication by inverse)
if ( pRoot->m_iToken=='/' && pRight && pRight->m_iToken==TOK_CONST_FLOAT )
{
pRight->m_fConst = 1.0f / pRight->m_fConst;
pRoot->m_iToken = '*';
return;
}
// SINT(int-attr)
if ( pRoot->m_iToken==TOK_FUNC && pRoot->m_iFunc==FUNC_SINT && pLeft )
{
if ( pLeft->m_iToken==TOK_ATTR_INT || pLeft->m_iToken==TOK_ATTR_BITS )
{
pRoot->m_iToken = TOK_ATTR_SINT;
pRoot->m_tLocator = pLeft->m_tLocator;
pRoot->m_iLeft = -1;
pLeft->m_iToken = 0;
}
}
}
static bool IsSupportedNEType ( int iType )
{
return iType==TOK_COLUMNAR_INT || iType==TOK_COLUMNAR_BIGINT || iType==TOK_COLUMNAR_BOOL || iType==TOK_ATTR_INT;
}
static bool CheckAndSwap ( ExprNode_t * & pLeft, ExprNode_t * & pRight )
{
if ( IsSupportedNEType ( pRight->m_iToken ) && pLeft->m_iToken==TOK_CONST_INT )
Swap ( pLeft, pRight );
return IsSupportedNEType ( pLeft->m_iToken ) && pRight->m_iToken==TOK_CONST_INT;
}
bool ExprParser_t::MultiNEMatch ( const ExprNode_t * pLeft, const ExprNode_t * pRight, ExprNode_t & tRes, CSphVector<int64_t> & dValues )
{
assert ( pLeft->m_iLeft!=-1 && pLeft->m_iRight!=-1 );
assert ( pRight->m_iLeft!=-1 && pRight->m_iRight!=-1 );
ExprNode_t * pLeft0 = &m_dNodes [ pLeft->m_iLeft ];
ExprNode_t * pLeft1 = &m_dNodes [ pLeft->m_iRight ];
ExprNode_t * pRight0 = &m_dNodes [ pRight->m_iLeft ];
ExprNode_t * pRight1 = &m_dNodes [ pRight->m_iRight ];
if ( !CheckAndSwap ( pLeft0, pLeft1 ) )
return false;
if ( !CheckAndSwap ( pRight0, pRight1 ) )
return false;
if ( pRight0->m_iLocator!=pLeft0->m_iLocator || pRight0->m_iToken!=pLeft0->m_iToken )
return false;
tRes = *pLeft0;
dValues.Add ( pLeft1->m_iConst );
dValues.Add ( pRight1->m_iConst );
return true;
}
bool ExprParser_t::TransformNENE ( ExprNode_t * pRoot, ExprNode_t * pLeft, ExprNode_t * pRight )
{
assert ( pRoot && pLeft && pRight );
assert ( pRoot->m_iToken==TOK_AND && pLeft->m_iToken==TOK_NE && pRight->m_iToken==TOK_NE );
ExprNode_t tRes;
CSphVector<int64_t> dValues;
if ( MultiNEMatch ( pLeft, pRight, tRes, dValues ) )
{
pRoot->m_iToken = TOK_NOT;
pRoot->m_iRight = -1;
// discard optimized tokens
pRight->m_iToken = 0;
m_dNodes[pRight->m_iLeft].m_iToken = 0;
m_dNodes[pRight->m_iRight].m_iToken = 0;
pLeft->m_iToken = TOK_FUNC;
pLeft->m_iFunc = FUNC_IN;
ExprNode_t * pLeft0 = &m_dNodes [ pLeft->m_iLeft ];
ExprNode_t * pLeft1 = &m_dNodes [ pLeft->m_iRight ];
pLeft0->m_iToken = tRes.m_iToken;
pLeft0->m_iLocator = tRes.m_iLocator;
pLeft0->m_tLocator = tRes.m_tLocator;
pLeft1->m_iToken = TOK_CONST_LIST;
pLeft1->m_pConsts = new ConstList_c();
for ( auto i : dValues )
pLeft1->m_pConsts->Add(i);
return true;
}
return false;
}
bool ExprParser_t::TransformInNE ( ExprNode_t * pRoot, ExprNode_t * pLeft, ExprNode_t * pRight )
{
assert ( pRoot && pLeft && pRight );
assert ( pRoot->m_iToken==TOK_AND && ( ( pLeft->m_iToken==TOK_NOT && pRight->m_iToken==TOK_NE ) || ( pLeft->m_iToken==TOK_NE && pRight->m_iToken==TOK_NOT ) ) );
ExprNode_t * pNotNode = pLeft->m_iToken==TOK_NOT ? pLeft : pRight;
ExprNode_t * pNENode = pLeft->m_iToken==TOK_NE ? pLeft : pRight;
assert ( pNotNode->m_iLeft!=-1 && pNotNode->m_iRight==-1 );
ExprNode_t * pInNode = &m_dNodes [ pNotNode->m_iLeft ];
bool bCond = pInNode->m_iToken==TOK_FUNC && pInNode->m_iFunc==FUNC_IN;
assert ( pInNode->m_iLeft!=-1 && pNotNode->m_iRight==-1 );
ExprNode_t * pIn0 = &m_dNodes [ pInNode->m_iLeft ];
ExprNode_t * pIn1 = &m_dNodes [ pInNode->m_iRight ];
bCond &= IsSupportedNEType ( pIn0->m_iToken ) && pIn1->m_iToken==TOK_CONST_LIST;
bCond &= pIn1->m_pConsts->m_eRetType==SPH_ATTR_INTEGER || pIn1->m_pConsts->m_eRetType==SPH_ATTR_BIGINT;
ExprNode_t * pNE0 = &m_dNodes [ pNENode->m_iLeft ];
ExprNode_t * pNE1 = &m_dNodes [ pNENode->m_iRight ];
bCond &= CheckAndSwap ( pNE0, pNE1 );
bCond &= pNE0->m_iToken == pIn0->m_iToken && pNE0->m_iLocator==pIn0->m_iLocator;
if ( bCond )
{
pIn1->m_pConsts->Add ( pNE1->m_iConst );
*pRoot = *pNotNode;
// discard optimized tokens
pNotNode->m_iToken = pNENode->m_iToken = pNE0->m_iToken = pNE1->m_iToken = 0;
return true;
}
return false;
}
// transform "var<>1 AND var<>2 AND var<>3" into "not var in (1,2,3)"
void ExprParser_t::MultiNEPass ( int iNode )
{
if ( iNode<0 )
return;
MultiNEPass ( m_dNodes [ iNode ].m_iLeft );
MultiNEPass ( m_dNodes [ iNode ].m_iRight );
ExprNode_t * pRoot = &m_dNodes[iNode];
if ( pRoot->m_iLeft==-1 || pRoot->m_iRight==-1 )
return;
ExprNode_t * pLeft = &m_dNodes [ pRoot->m_iLeft ];
ExprNode_t * pRight = &m_dNodes [ pRoot->m_iRight ];
if ( pRoot->m_iToken==TOK_AND && pLeft->m_iToken==TOK_NE && pRight->m_iToken==TOK_NE )
{
if ( TransformNENE ( pRoot, pLeft, pRight ) )
return;
}
if ( pRoot->m_iToken==TOK_AND && ( ( pLeft->m_iToken==TOK_NOT && pRight->m_iToken==TOK_NE ) || ( pLeft->m_iToken==TOK_NE && pRight->m_iToken==TOK_NOT ) ) )
{
if ( TransformInNE ( pRoot, pLeft, pRight ) )
return;
}
}
static const char * TokName (int iTok, int iFunc)
{
if ( iTok<256 )
{
if ( iTok<-0 )
return "deleted_token_fixme";
static char onechar[2] = { 0 };
onechar[0] = char(iTok);
return onechar;
}
switch ( (yytokentype) iTok)
{
case TOK_CONST_INT: return "const_int";
case TOK_CONST_FLOAT: return "const_float";
case TOK_CONST_STRING: return "const_string";
case TOK_SUBKEY: return "subkey";
case TOK_DOT_NUMBER: return "dot_number";
case TOK_ATTR_INT: return "attr_int";
case TOK_ATTR_BITS: return "attr_bits";
case TOK_ATTR_FLOAT: return "attr_float";
case TOK_ATTR_MVA32: return "attr_mva32";
case TOK_ATTR_MVA64: return "attr_mva64";
case TOK_ATTR_STRING: return "attr_string";
case TOK_ATTR_FACTORS: return "attr_factors";
case TOK_IF: return "if";
case TOK_FUNC: return FuncNameByHash ( iFunc );
case TOK_FUNC_IN: return "func_in";
case TOK_FUNC_RAND: return "func_rand";
case TOK_FUNC_REMAP: return "func_remap";
case TOK_FUNC_PF: return "func_pf";
case TOK_FUNC_JA: return "func_ja";
case TOK_FUNC_DATE: return "func_date";
case TOK_USERVAR: return "uservar";
case TOK_UDF: return "udf";
case TOK_HOOK_IDENT: return "hook_ident";
case TOK_HOOK_FUNC: return "hook_func";
case TOK_IDENT: return "ident";
case TOK_ATTR_JSON: return "attr_json";
case TOK_FIELD: return "field";
case TOK_COLUMNAR_INT: return "columnar_int";
case TOK_COLUMNAR_TIMESTAMP: return "columnar_timestamp";
case TOK_COLUMNAR_BIGINT: return "columnar_bigint";
case TOK_COLUMNAR_BOOL: return "columnar_bool";
case TOK_COLUMNAR_FLOAT: return "columnar_float";
case TOK_COLUMNAR_STRING: return "columnar_string";
case TOK_COLUMNAR_UINT32SET: return "columnar_uint32set";
case TOK_COLUMNAR_INT64SET: return "columnar_int64set";
case TOK_COLUMNAR_FLOATVEC: return "columnar_floatvec";
case TOK_ATWEIGHT: return "atweight";
case TOK_GROUPBY: return "groupby";
case TOK_WEIGHT: return "weight";
case TOK_COUNT: return "count";
case TOK_DISTINCT: return "distinct";
case TOK_CONST_LIST: return "const_list";
case TOK_ATTR_SINT: return "attr_sint";
case TOK_MAP_ARG: return "map_arg";
case TOK_FOR: return "for";
case TOK_ITERATOR: return "iterator";
case TOK_IS: return "is";
case TOK_NULL: return "null";
case TOK_IS_NULL: return "is_null";
case TOK_IS_NOT_NULL: return "is_not_null";
case TOK_OR: return "or";
case TOK_AND: return "and";
case TOK_NE: return "!=";
case TOK_EQ: return "=";
case TOK_GTE: return ">=";
case TOK_LTE: return "<=";
case TOK_MOD: return "mod";
case TOK_DIV: return "div";
case TOK_NOT: return "not";
case TOK_NEG: return "neg";
default: return "Unknown_need_to_fix";
}
}
// debug dump
static void Dump ( int iNode, const VecTraits_T<ExprNode_t>& dNodes, StringBuilder_c& tOut )
{
if ( iNode<0 )
return;
ExprNode_t & tNode = dNodes[iNode];
switch ( tNode.m_iToken )
{
case TOK_CONST_INT:
tOut << tNode.m_iConst;
break;
case TOK_CONST_FLOAT:
tOut << tNode.m_fConst;
break;
case TOK_ATTR_INT:
case TOK_ATTR_SINT:
tOut << "row[" << tNode.m_tLocator.m_iBitOffset / 32 << "]";
break;
default:
tOut << "(";
Dump ( tNode.m_iLeft, dNodes, tOut );
tOut << " ";
if ( tNode.m_iToken<256 )
tOut.RawC ( (char) tNode.m_iToken);
else
tOut << TokName (tNode.m_iToken,tNode.m_iFunc);
tOut << " ";
Dump ( tNode.m_iRight, dNodes, tOut );
tOut << ")";
break;
}
}
void ExprParser_t::Dump ( int iNode )
{
if ( iNode<0 )
return;
StringBuilder_c tOut;
::Dump ( iNode, m_dNodes, tOut );
printf ("%s\n", tOut.cstr());
}
static void PrintArrow ( StringBuilder_c & tRes, CSphVector<int>& dPref, int iFrom, int iTo, const char* szSuff )
{
const char* szColor = ( iTo>iFrom ) ? " color=red": "";
dPref.Add ( tRes.GetLength () );
tRes.Sprintf ( "_%d%s->", iFrom, szSuff );
dPref.Add ( tRes.GetLength () );
tRes.Sprintf ( "_%d[label=%d%s]\n", iTo, iTo, szColor );
// 10:l -> 6 [label=6]
}
static void DumpNode2Dot (StringBuilder_c& tRes, CSphVector<int>& dPref, const VecTraits_T<ExprNode_t>& dNodes, int iNode )
{
if ( iNode<0 )
return;
ExprNode_t & tNode = dNodes[iNode];
if ( tNode.m_iToken<=0 )
return;
dPref.Add ( tRes.GetLength () );
tRes << "_" << iNode; // node num
switch ( tNode.m_iToken )
{
case TOK_CONST_INT:
tRes.Sprintf ( "[shape=circle label=%d]\n", tNode.m_iConst );
break;
case TOK_CONST_FLOAT:
tRes.Sprintf ( "[shape=circle label=%f]\n", tNode.m_fConst );
break;
case TOK_ATTR_INT:
case TOK_ATTR_SINT:
tRes.Sprintf ( "[shape=oval label=row_%d]\n", tNode.m_tLocator.m_iBitOffset / 32 );
break;
default:
tRes << "[label=\"";
if ( tNode.m_iLeft>=0 )
tRes << "<l>|";
tRes << TokName ( tNode.m_iToken, tNode.m_iFunc );
if ( tNode.m_iRight>=0 )
tRes << "|<r>";
tRes << "\"]\n";
break;
}
if ( tNode.m_iLeft>=0 )
PrintArrow ( tRes, dPref, iNode, tNode.m_iLeft, ":l" );
if ( tNode.m_iRight>=0 )
PrintArrow ( tRes, dPref, iNode, tNode.m_iRight, ":r" );
}
static void DumpTree2Dot ( StringBuilder_c& tRes, CSphVector<int>& dPref, const VecTraits_T<ExprNode_t>& dNodes, int iRoot )
{
// use https://dreampuf.github.io/GraphvizOnline to visualize the graph
// note, that is NOT recursive, so no stack hit expected.
ARRAY_CONSTFOREACH ( i, dNodes )
DumpNode2Dot ( tRes, dPref, dNodes, i );
// output header (root node and pointer)
tRes << "root";
dPref.Add ( tRes.GetLength () );
tRes << "_[shape=invhouse label=root]\nroot";
dPref.Add ( tRes.GetLength () );
tRes << "_->";
dPref.Add ( tRes.GetLength () );
tRes << "_" << iRoot << "[label=" << iRoot << "]";
}
using StrWithPlaces_t = std::pair<CSphString, CSphVector<int>>;
static void Render2Dot ( StrWithPlaces_t& tRes, VecTraits_T<ExprNode_t>& dNodes, int iRoot )
{
auto & dPlaces = tRes.second;
StringBuilder_c sDot;
DumpTree2Dot ( sDot, dPlaces, dNodes, iRoot );
sDot.MoveTo ( tRes.first );
}
using NamedDot = std::pair<CSphString, StrWithPlaces_t>;
static void RenderAndAddWithName ( NamedDot & tOut, VecTraits_T<ExprNode_t> & dNodes, int iRoot, const char* szName )
{
tOut.first = szName;
Render2Dot ( tOut.second, dNodes, iRoot );
}
static void PlacePrefix ( StrWithPlaces_t & tRes, const char* sPrefix )
{
char c = *sPrefix;
auto * sRes = const_cast<char *> (tRes.first.cstr ());
tRes.second.Apply ( [sRes, c] ( int i ) { sRes[i] = c; } );
}
static CSphString Dots2String ( CSphVector<NamedDot>& dDots )
{
static const BYTE uPREFIXES = 8;
static const char* dPrefixes[uPREFIXES] = {"a", "b", "c", "d", "e", "f", "g", "h"};
int iPrefix = 0;
StringBuilder_c tOut;
tOut << "digraph A {\nedge[fontsize=9]\nnode[shape=record]\ncolor=blue\n";
int j = 0;
for ( int i = 1; i<dDots.GetLength (); ++i )
{
if ( dDots[i].second.first == dDots[j].second.first )
continue;
tOut << "subgraph cluster_" << dPrefixes[iPrefix] << " {\nlabel=\"" << dDots[j].first << "\"\n";
PlacePrefix ( dDots[j].second, dPrefixes[iPrefix] );
tOut << dDots[j].second.first << "\n}\n";
++iPrefix;
j = i;
}
tOut << "subgraph cluster_" << dPrefixes[iPrefix] << " {\nlabel=\"" << dDots[j].first << "\"\n";
PlacePrefix ( dDots[j].second, dPrefixes[iPrefix] );
tOut << dDots[j].second.first << "\n}\n";
tOut << "}";
CSphString sResult;
tOut.MoveTo(sResult);
return sResult;
}
alignas ( 128 ) static const BYTE g_UrlEncodeTable[] = { // 0 if need escape, 1 if not
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, // -.
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, // 0123456789
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // ABCDEFGHIJKLMNO
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, // PQRSTUVWXYZ_
0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // abcdefghijklmno
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, // pqrstuvwxyz~
};
static CSphString UrlEncode ( const CSphString& sSource )
{
StringBuilder_c sRes;
for ( const auto* pC = sSource.cstr (); *pC; ++pC )
{
auto c = (BYTE)*pC;
if ( ( c & 0x80 ) || !g_UrlEncodeTable[c] )
sRes.Sprintf("%%%02x",c);
else
sRes.RawC((char)c);
}
CSphString sResult;
sRes.MoveTo (sResult);
return sResult;
}
/// optimize subtree
void ExprParser_t::Optimize ( int iNode )
{
auto eProfile = session::GetProfile();
if ( eProfile==Profile_e::DOTEXPR || eProfile==Profile_e::DOTEXPRURL )
{
// fixme! m.b. iteratively repeat while something changes?
CSphVector<NamedDot> dDots;
RenderAndAddWithName ( dDots.Add (), m_dNodes, m_iParsed, "Raw (non-optimized)" );
CanonizePass ( iNode );
RenderAndAddWithName ( dDots.Add (), m_dNodes, m_iParsed, "CanonizePass" );
ConstantFoldPass ( iNode );
RenderAndAddWithName ( dDots.Add (), m_dNodes, m_iParsed, "ConstantFoldPass" );
VariousOptimizationsPass ( iNode );
RenderAndAddWithName ( dDots.Add (), m_dNodes, m_iParsed, "VariousOptimizationsPass" );
MultiNEPass ( iNode );
RenderAndAddWithName ( dDots.Add (), m_dNodes, m_iParsed, "MultiNEPass" );
auto sDot = Dots2String ( dDots );
dDots.Reset();
StringBuilder_c tOut;
tOut << "Expr was: " << m_sExpr << "\n";
if ( eProfile==Profile_e::DOTEXPR )
tOut << sDot;
else
tOut << "https://dreampuf.github.io/GraphvizOnline/#" << UrlEncode(sDot);
printf ( "%s\n", tOut.cstr () );
fflush ( stdout );
return;
}
CanonizePass ( iNode );
ConstantFoldPass ( iNode );
VariousOptimizationsPass ( iNode );
MultiNEPass ( iNode );
}
/// fold arglist into array
/// moves also ownership (so, 1-st param owned by dArgs on exit)
void MoveToArgList ( ISphExpr * pLeft, VecRefPtrs_t<ISphExpr*> &dArgs )
{
if ( !pLeft || !pLeft->IsArglist ())
{
dArgs.Add ( pLeft );
return;
}
auto * pArgs = (Expr_Arglist_c *)pLeft;
if ( dArgs.IsEmpty () )
dArgs.SwapData ( pArgs->m_dArgs );
else {
dArgs.Append ( pArgs->m_dArgs );
pArgs->m_dArgs.Reset();
}
SafeRelease ( pArgs );
}
using UdfInt_fn = sphinx_int64_t ( * ) ( SPH_UDF_INIT *, SPH_UDF_ARGS *, char * );
using UdfDouble_fn = double ( * ) ( SPH_UDF_INIT *, SPH_UDF_ARGS *, char * );
using UdfCharptr_fn = char * ( * ) ( SPH_UDF_INIT *, SPH_UDF_ARGS *, char * );
class Expr_Udf_c : public ISphExpr
{
public:
explicit Expr_Udf_c ( UdfCall_t * pCall, QueryProfile_c * pProfiler )
: m_pCall ( pCall )
, m_pProfiler ( pProfiler )
{
SPH_UDF_ARGS & tArgs = m_pCall->m_tArgs;
assert ( tArgs.arg_values==nullptr );
tArgs.arg_values = new char * [ tArgs.arg_count ];
tArgs.str_lengths = new int [ tArgs.arg_count ];
m_dArgvals.Resize ( tArgs.arg_count );
ARRAY_FOREACH ( i, m_dArgvals )
tArgs.arg_values[i] = (char*) &m_dArgvals[i];
}
~Expr_Udf_c () override
{
if ( m_pCall->m_pUdf->m_fnDeinit )
m_pCall->m_pUdf->m_fnDeinit ( &m_pCall->m_tInit );
SafeDelete ( m_pCall );
}
void FillArgs ( const CSphMatch & tMatch ) const
{
uint64_t uPacked = 0;
ESphJsonType eJson = JSON_NULL;
uint64_t uOff = 0;
CSphVector<BYTE> dTmp;
// FIXME? a cleaner way to reinterpret?
SPH_UDF_ARGS & tArgs = m_pCall->m_tArgs;
ARRAY_FOREACH ( i, m_dArgs )
{
switch ( tArgs.arg_types[i] )
{
case SPH_UDF_TYPE_UINT32: *(DWORD*)&m_dArgvals[i] = m_dArgs[i]->IntEval ( tMatch ); break;
case SPH_UDF_TYPE_INT64: m_dArgvals[i] = m_dArgs[i]->Int64Eval ( tMatch ); break;
case SPH_UDF_TYPE_FLOAT: *(float*)&m_dArgvals[i] = m_dArgs[i]->Eval ( tMatch ); break;
case SPH_UDF_TYPE_STRING: tArgs.str_lengths[i] = m_dArgs[i]->StringEval ( tMatch, (const BYTE**)&tArgs.arg_values[i] ); break;
case SPH_UDF_TYPE_UINT32SET:
case SPH_UDF_TYPE_INT64SET:
{
auto dMva = m_dArgs[i]->MvaEval ( tMatch );
tArgs.arg_values[i] = (char*)const_cast<BYTE*>(dMva.first);
tArgs.str_lengths[i] = dMva.second / (( tArgs.arg_types[i]==SPH_UDF_TYPE_UINT32SET ) ? sizeof(DWORD) : sizeof(int64_t));
break;
}
case SPH_UDF_TYPE_FACTORS:
tArgs.arg_values[i] = (char *)const_cast<BYTE*>( m_dArgs[i]->FactorEval(tMatch) );
break;
case SPH_UDF_TYPE_JSON:
uPacked = m_dArgs[i]->Int64Eval ( tMatch );
eJson = sphJsonUnpackType ( uPacked );
uOff = sphJsonUnpackOffset ( uPacked );
if ( !uOff || eJson==JSON_NULL )
{
tArgs.arg_values[i] = nullptr;
tArgs.str_lengths[i] = 0;
} else
{
JsonEscapedBuilder sTmp;
sphJsonFieldFormat ( sTmp, m_pBlobPool+uOff, eJson, false );
tArgs.str_lengths[i] = sTmp.GetLength();
tArgs.arg_values[i] = (char*) sTmp.Leak();
}
break;
default: assert ( 0 ); m_dArgvals[i] = 0; break;
}
}
}
void FreeArgs() const
{
assert ( !m_pCall->m_dArgs2Free.GetLength() || ( m_pCall->m_dArgs2Free.GetLength() && m_pCall->m_tArgs.arg_count ) );
for ( int iAttr : m_pCall->m_dArgs2Free )
SafeDeleteArray ( m_pCall->m_tArgs.arg_values[iAttr] );
}
void AdoptArgs ( ISphExpr * pArglist )
{
MoveToArgList ( pArglist, m_dArgs );
}
CSphVector<ISphExpr*> & GetArgs()
{
return m_dArgs;
}
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) override
{
for ( auto i : m_dArgs )
i->FixupLocator ( pOldSchema, pNewSchema );
}
void Command ( ESphExprCommand eCmd, void * pArg ) override
{
switch ( eCmd )
{
case SPH_EXPR_GET_UDF:
*((bool*)pArg) = true;
return;
case SPH_EXPR_GET_STATEFUL_UDF:
if ( m_pCall && m_pCall->m_tInit.func_data )
*((bool*)pArg) = true;
return;
case SPH_EXPR_SET_BLOB_POOL:
m_pBlobPool = (const BYTE*)pArg;
break;
default:
break;
}
for ( auto & pExpr : m_dArgs )
pExpr->Command ( eCmd, pArg );
}
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & bDisable ) override
{
bDisable = true;
return 0;
}
protected:
UdfCall_t * m_pCall {nullptr};
VecRefPtrs_t<ISphExpr*> m_dArgs;
mutable CSphVector<int64_t> m_dArgvals;
const BYTE * m_pBlobPool {nullptr};
mutable char m_bError {0};
QueryProfile_c * m_pProfiler {nullptr};
Expr_Udf_c ( const Expr_Udf_c& rhs )
: m_pCall ( new UdfCall_t )
, m_pProfiler ( rhs.m_pProfiler )
{
assert ( !rhs.m_pCall->m_tInit.func_data );
m_pBlobPool = rhs.m_pBlobPool;
m_pCall->m_pUdf = rhs.m_pCall->m_pUdf;
m_pCall->m_dArgs2Free = rhs.m_pCall->m_dArgs2Free;
SPH_UDF_ARGS & tArgs = m_pCall->m_tArgs;
const SPH_UDF_ARGS & tSrcArgs = rhs.m_pCall->m_tArgs;
tArgs.arg_count = tSrcArgs.arg_count;
m_dArgs.Resize ( tArgs.arg_count );
ARRAY_FOREACH ( i, m_dArgs )
m_dArgs[i] = SafeClone ( rhs.m_dArgs[i] );
tArgs.arg_types = new sphinx_udf_argtype [ tArgs.arg_count ];
memcpy ( tArgs.arg_types, tSrcArgs.arg_types, sizeof ( sphinx_udf_argtype ) * tArgs.arg_count );
tArgs.arg_values = new char * [tArgs.arg_count];
tArgs.str_lengths = new int[tArgs.arg_count];
m_dArgvals.Resize ( tArgs.arg_count );
ARRAY_FOREACH ( i, m_dArgvals )
tArgs.arg_values[i] = (char *) &m_dArgvals[i];
}
};
class Expr_UdfInt_c : public Expr_Udf_c
{
public:
explicit Expr_UdfInt_c ( UdfCall_t * pCall, QueryProfile_c * pProfiler )
: Expr_Udf_c ( pCall, pProfiler )
{
assert ( IsInt ( pCall->m_pUdf->m_eRetType ) );
}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final
{
if ( m_bError )
return 0;
CSphScopedProfile tProf ( m_pProfiler, SPH_QSTATE_EVAL_UDF );
FillArgs ( tMatch );
auto pFn = (UdfInt_fn) m_pCall->m_pUdf->m_fnFunc;
auto iRes = (int64_t) pFn ( &m_pCall->m_tInit, &m_pCall->m_tArgs, &m_bError );
FreeArgs();
return iRes;
}
int IntEval ( const CSphMatch & tMatch ) const final { return (int) Int64Eval ( tMatch ); }
float Eval ( const CSphMatch & tMatch ) const final { return (float) Int64Eval ( tMatch ); }
ISphExpr * Clone () const final
{
return new Expr_UdfInt_c ( *this );
}
private:
Expr_UdfInt_c ( const Expr_UdfInt_c& ) = default;
};
class Expr_UdfFloat_c : public Expr_Udf_c
{
public:
explicit Expr_UdfFloat_c ( UdfCall_t * pCall, QueryProfile_c * pProfiler )
: Expr_Udf_c ( pCall, pProfiler )
{
assert ( pCall->m_pUdf->m_eRetType==SPH_ATTR_FLOAT );
m_pFn = (UdfDouble_fn)m_pCall->m_pUdf->m_fnFunc;
}
float Eval ( const CSphMatch & tMatch ) const final
{
if ( m_bError )
return 0;
CSphScopedProfile tProf ( m_pProfiler, SPH_QSTATE_EVAL_UDF );
FillArgs ( tMatch );
assert ( m_pFn == m_pCall->m_pUdf->m_fnFunc );
auto fRes = (float)m_pFn ( &m_pCall->m_tInit, &m_pCall->m_tArgs, &m_bError );
FreeArgs();
return fRes;
}
int IntEval ( const CSphMatch & tMatch ) const final { return (int) Eval ( tMatch ); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return (int64_t) Eval ( tMatch ); }
ISphExpr * Clone () const final
{
return new Expr_UdfFloat_c ( *this );
}
private:
Expr_UdfFloat_c ( const Expr_UdfFloat_c& ) = default;
UdfDouble_fn m_pFn; // to avoid dereference on each Eval() call
};
class Expr_UdfStringptr_c : public Expr_Udf_c
{
public:
explicit Expr_UdfStringptr_c ( UdfCall_t * pCall, QueryProfile_c * pProfiler )
: Expr_Udf_c ( pCall, pProfiler )
{
assert ( pCall->m_pUdf->m_eRetType==SPH_ATTR_STRINGPTR );
}
float Eval ( const CSphMatch & ) const final
{
assert ( 0 && "internal error: stringptr udf evaluated as float" );
return 0.0f;
}
int IntEval ( const CSphMatch & ) const final
{
assert ( 0 && "internal error: stringptr udf evaluated as int" );
return 0;
}
int64_t Int64Eval ( const CSphMatch & ) const final
{
assert ( 0 && "internal error: stringptr udf evaluated as bigint" );
return 0;
}
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const final
{
if ( m_bError )
{
*ppStr = nullptr;
return 0;
}
CSphScopedProfile tProf ( m_pProfiler, SPH_QSTATE_EVAL_UDF );
FillArgs ( tMatch );
auto pFn = (UdfCharptr_fn) m_pCall->m_pUdf->m_fnFunc;
char * pRes = pFn ( &m_pCall->m_tInit, &m_pCall->m_tArgs, &m_bError ); // owned now!
*ppStr = (const BYTE*) pRes;
int iLen = ( pRes ?(int) strlen(pRes) : 0 );
FreeArgs();
return iLen;
}
bool IsDataPtrAttr() const final
{
return true;
}
ISphExpr * Clone () const final
{
return new Expr_UdfStringptr_c ( *this );
}
private:
Expr_UdfStringptr_c ( const Expr_UdfStringptr_c& ) = default;
};
ISphExpr * ExprParser_t::CreateUdfNode ( int iCall, ISphExpr * pLeft )
{
if ( !CheckStoredArg(pLeft) )
return nullptr;
Expr_Udf_c * pRes = nullptr;
switch ( m_dUdfCalls[iCall]->m_pUdf->m_eRetType )
{
case SPH_ATTR_INTEGER:
case SPH_ATTR_BIGINT:
pRes = new Expr_UdfInt_c ( m_dUdfCalls[iCall], m_pProfiler );
break;
case SPH_ATTR_FLOAT:
pRes = new Expr_UdfFloat_c ( m_dUdfCalls[iCall], m_pProfiler );
break;
case SPH_ATTR_STRINGPTR:
pRes = new Expr_UdfStringptr_c ( m_dUdfCalls[iCall], m_pProfiler );
break;
default:
m_sCreateError.SetSprintf ( "internal error: unhandled type %d in CreateUdfNode()", m_dUdfCalls[iCall]->m_pUdf->m_eRetType );
break;
}
if ( pRes )
{
SafeAddRef ( pLeft );
if ( pLeft )
pRes->AdoptArgs ( pLeft );
m_dUdfCalls[iCall] = nullptr; // evaluator owns it now
}
return pRes;
}
CSphString ExprParser_t::GetNameByLocator ( int iNode ) const
{
return GetNameByLocator ( m_dNodes[iNode] );
}
CSphString ExprParser_t::GetNameByLocator ( const ExprNode_t & tNode ) const
{
int iLocator = tNode.m_iLocator;
if ( iLocator==-1 )
return "";
return m_pSchema->GetAttr(iLocator).m_sName;
}
ISphExpr * ExprParser_t::CreateExistNode ( const ExprNode_t & tNode )
{
assert ( m_dNodes[tNode.m_iLeft].m_iToken==',' );
int iAttrName = m_dNodes[tNode.m_iLeft].m_iLeft;
int iAttrDefault = m_dNodes[tNode.m_iLeft].m_iRight;
assert ( iAttrName>=0 && iAttrName<m_dNodes.GetLength()
&& iAttrDefault>=0 && iAttrDefault<m_dNodes.GetLength() );
auto iNameStart = GetConstStrOffset ( m_dNodes[iAttrName] );
auto iNameLen = GetConstStrLength ( m_dNodes[iAttrName] );
// skip head and tail non attribute name symbols
const char* sExpr = m_sExpr.first;
while ( sExpr[iNameStart]!='\0' && ( sExpr[iNameStart]=='\'' || sExpr[iNameStart]==' ' ) && iNameLen )
{
iNameStart++;
--iNameLen;
}
while ( sExpr[iNameStart+iNameLen-1]!='\0'
&& ( sExpr[iNameStart+iNameLen-1]=='\'' || sExpr[iNameStart+iNameLen-1]==' ' )
&& iNameLen )
{
--iNameLen;
}
if ( iNameLen<=0 )
{
m_sCreateError.SetSprintf ( "first EXIST() argument must be valid string" );
return nullptr;
}
assert ( iNameStart>=0 && iNameLen>0 && iNameStart+iNameLen<=m_sExpr.second );
CSphString sAttr ( sExpr+iNameStart, iNameLen );
sphColumnToLowercase ( const_cast<char *>( sAttr.cstr() ) );
int iLoc = m_pSchema->GetAttrIndex ( sAttr.cstr() );
if ( iLoc>=0 )
{
const CSphColumnInfo & tCol = m_pSchema->GetAttr ( iLoc );
if ( tCol.m_eAttrType==SPH_ATTR_UINT32SET || tCol.m_eAttrType==SPH_ATTR_INT64SET || tCol.m_eAttrType==SPH_ATTR_STRING )
{
m_sCreateError = "MVA and STRING in EXIST() prohibited";
return nullptr;
}
bool bColumnar = tCol.IsColumnar();
bool bStored = tCol.m_uAttrFlags & CSphColumnInfo::ATTR_STORED;
const CSphAttrLocator & tLoc = tCol.m_tLocator;
if ( tNode.m_eRetType==SPH_ATTR_FLOAT )
{
if ( bColumnar )
return CreateExpr_GetColumnarFloat ( tCol.m_sName, bStored );
else
return new Expr_GetFloat_c ( tLoc, tCol.m_sName );
}
else
{
if ( bColumnar )
return CreateExpr_GetColumnarInt ( tCol.m_sName, bStored );
else
return new Expr_GetInt_c ( tLoc, tCol.m_sName );
}
} else
{
if ( tNode.m_eRetType==SPH_ATTR_INTEGER )
return new Expr_GetIntConst_c ( (int)m_dNodes[iAttrDefault].m_iConst );
else if ( tNode.m_eRetType==SPH_ATTR_BIGINT )
return new Expr_GetInt64Const_c ( m_dNodes[iAttrDefault].m_iConst );
else
return new Expr_GetConst_c ( m_dNodes[iAttrDefault].m_fConst );
}
}
//////////////////////////////////////////////////////////////////////////
class Expr_Contains_c : public ISphExpr
{
protected:
CSphRefcountedPtr<ISphExpr> m_pLat;
CSphRefcountedPtr<ISphExpr> m_pLon;
static bool Contains ( float x, float y, int n, const float * p )
{
bool bIn = false;
for ( int ii=0; ii<n; ii+=2 )
{
// get that edge
float ax = p[ii];
float ay = p[ii+1];
float bx = ( ii==n-2 ) ? p[0] : p[ii+2];
float by = ( ii==n-2 ) ? p[1] : p[ii+3];
// check point vs edge
float t1 = (x-ax)*(by-ay);
float t2 = (y-ay)*(bx-ax);
if ( t1==t2 && !( ax==bx && ay==by ) )
{
// so AP and AB are colinear
// because (AP dot (-AB.y, AB.x)) aka (t1-t2) is 0
// check (AP dot AB) vs (AB dot AB) then
float t3 = (x-ax)*(bx-ax) + (y-ay)*(by-ay); // AP dot AP
float t4 = (bx-ax)*(bx-ax) + (by-ay)*(by-ay); // AB dot AB
if ( t3>=0 && t3<=t4 )
return true;
}
// count edge crossings
if ( ( ay>y )!=(by>y) )
if ( ( t1<t2 ) ^ ( by<ay ) )
bIn = !bIn;
}
return bIn;
}
public:
Expr_Contains_c ( ISphExpr * pLat, ISphExpr * pLon )
: m_pLat ( pLat )
, m_pLon ( pLon )
{
SafeAddRef ( pLat );
SafeAddRef ( pLon );
}
Expr_Contains_c() = default;
float Eval ( const CSphMatch & tMatch ) const override
{
return (float)IntEval ( tMatch );
}
int64_t Int64Eval ( const CSphMatch & tMatch ) const override
{
return IntEval ( tMatch );
}
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) override
{
m_pLat->FixupLocator ( pOldSchema, pNewSchema );
m_pLon->FixupLocator ( pOldSchema, pNewSchema );
}
void Command ( ESphExprCommand eCmd, void * pArg ) override
{
m_pLat->Command ( eCmd, pArg );
m_pLon->Command ( eCmd, pArg );
}
protected:
uint64_t CalcHash ( const char * szTag, const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME_NOCHECK(szTag);
CALC_CHILD_HASH(m_pLat);
CALC_CHILD_HASH(m_pLon);
return CALC_DEP_HASHES();
}
// FIXME! implement SetBlobPool?
Expr_Contains_c ( const Expr_Contains_c& rhs )
: m_pLat ( SafeClone (rhs.m_pLat) )
, m_pLon ( SafeClone (rhs.m_pLon) )
{}
};
class Expr_ContainsConstvec_c : public Expr_Contains_c, public Poly2dBBox_t
{
using BASE = Expr_Contains_c;
public:
Expr_ContainsConstvec_c ( ISphExpr * pLat, ISphExpr * pLon, const CSphString & sAttrLat, const CSphString & sAttrLon, const CSphVector<int> & dNodes, const ExprNode_t * pNodes, bool bGeoTesselate )
: Expr_Contains_c ( pLat, pLon )
{
m_sAttrLat = sAttrLat;
m_sAttrLon = sAttrLon;
// copy polygon data
assert ( dNodes.GetLength()>=6 );
m_dPoly.Resize ( dNodes.GetLength() );
ARRAY_FOREACH ( i, dNodes )
m_dPoly[i] = FloatVal ( &pNodes[dNodes[i]] );
// handle (huge) geosphere polygons
if ( bGeoTesselate )
GeoTesselate ( m_dPoly );
m_iNumPoints = m_dPoly.GetLength()/2;
// compute bbox
m_fMinX = m_fMaxX = m_dPoly[0];
for ( int i=2; i<m_dPoly.GetLength(); i+=2 )
{
m_fMinX = Min ( m_fMinX, m_dPoly[i] );
m_fMaxX = Max ( m_fMaxX, m_dPoly[i] );
}
m_fMinY = m_fMaxY = m_dPoly[1];
for ( int i=3; i<m_dPoly.GetLength(); i+=2 )
{
m_fMinY = Min ( m_fMinY, m_dPoly[i] );
m_fMaxY = Max ( m_fMaxY, m_dPoly[i] );
}
}
int IntEval ( const CSphMatch & tMatch ) const final
{
// eval args, do bbox check
float fLat = m_pLat->Eval(tMatch);
if ( fLat<m_fMinX || fLat>m_fMaxX )
return 0;
float fLon = m_pLon->Eval(tMatch);
if ( fLon<m_fMinY || fLon>m_fMaxY )
return 0;
// do the polygon check
return Contains ( fLat, fLon, m_dPoly.GetLength(), m_dPoly.Begin() );
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_ContainsConstvec_c");
CALC_POD_HASHES(m_dPoly);
return CALC_PARENT_HASH();
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
BASE::Command ( eCmd, pArg );
if ( eCmd==SPH_EXPR_GET_POLY2D_BBOX )
{
auto pBBox = (std::pair<Poly2dBBox_t *, bool>*)pArg;
assert(pBBox);
pBBox->first = this;
pBBox->second = true;
}
}
ISphExpr * Clone() const final { return new Expr_ContainsConstvec_c ( *this ); }
protected:
CSphVector<float> m_dPoly;
private:
Expr_ContainsConstvec_c ( const Expr_ContainsConstvec_c& ) = default;
};
class Expr_ContainsExprvec_c : public Expr_Contains_c
{
protected:
mutable CSphVector<float> m_dPoly;
VecRefPtrs_t<ISphExpr*> m_dExpr;
public:
Expr_ContainsExprvec_c ( ISphExpr * pLat, ISphExpr * pLon, CSphVector<ISphExpr*> & dExprs )
: Expr_Contains_c ( pLat, pLon )
{
m_dExpr.SwapData ( dExprs );
m_dPoly.Resize ( m_dExpr.GetLength() );
}
int IntEval ( const CSphMatch & tMatch ) const final
{
ARRAY_FOREACH ( i, m_dExpr )
m_dPoly[i] = m_dExpr[i]->Eval ( tMatch );
return Contains ( m_pLat->Eval(tMatch), m_pLon->Eval(tMatch), m_dPoly.GetLength(), m_dPoly.Begin() );
}
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) final
{
Expr_Contains_c::FixupLocator ( pOldSchema, pNewSchema );
for ( auto i : m_dExpr )
i->FixupLocator ( pOldSchema, pNewSchema );
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
Expr_Contains_c::Command ( eCmd, pArg );
ARRAY_FOREACH ( i, m_dExpr )
m_dExpr[i]->Command ( eCmd, pArg );
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_ContainsExprvec_c");
CALC_CHILD_HASHES(m_dExpr);
return CALC_PARENT_HASH();
}
ISphExpr * Clone () const final
{
return new Expr_ContainsExprvec_c ( *this );
}
private:
Expr_ContainsExprvec_c ( const Expr_ContainsExprvec_c& rhs )
: Expr_Contains_c ( rhs )
, m_dPoly ( rhs.m_dPoly )
{
m_dExpr.Resize ( rhs.m_dExpr.GetLength () );
ARRAY_FOREACH ( i, m_dExpr )
m_dExpr[i] = SafeClone (rhs.m_dExpr[i]);
}
};
class Expr_ContainsStrattr_c : public Expr_Contains_c
{
protected:
CSphRefcountedPtr<ISphExpr> m_pStr;
bool m_bGeo;
public:
Expr_ContainsStrattr_c ( ISphExpr * pLat, ISphExpr * pLon, ISphExpr * pStr, bool bGeo )
: Expr_Contains_c ( pLat, pLon )
, m_pStr ( pStr )
, m_bGeo ( bGeo )
{
SafeAddRef ( pStr );
}
static void ParsePoly ( const char * p, int iLen, CSphVector<float> & dPoly )
{
const char * pBegin = p;
const char * pMax = sphFindLastNumeric ( p, iLen );
while ( p<pMax )
{
if ( isdigit(p[0]) || ( p+1<pMax && p[0]=='-' && isdigit(p[1]) ) )
{
char * pValue = const_cast<char*>(p);
dPoly.Add ( (float)strtod ( p, &pValue ) );
p = pValue;
}
else
p++;
}
// edge case - last numeric touches the end
iLen -= pMax - pBegin;
if ( iLen )
dPoly.Add ( (float)strtod ( CSphString(pMax, iLen).cstr (), nullptr ) );
}
int IntEval ( const CSphMatch & tMatch ) const final
{
const char * pStr;
assert ( !m_pStr->IsDataPtrAttr() ); // aware of mem leaks caused by some StringEval implementations
int iLen = m_pStr->StringEval ( tMatch, (const BYTE **)&pStr );
CSphVector<float> dPoly;
ParsePoly ( pStr, iLen, dPoly );
if ( dPoly.GetLength()<6 )
return 0;
// OPTIMIZE? add quick bbox check too?
if ( m_bGeo )
GeoTesselate ( dPoly );
return Contains ( m_pLat->Eval(tMatch), m_pLon->Eval(tMatch), dPoly.GetLength(), dPoly.Begin() );
}
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) final
{
Expr_Contains_c::FixupLocator ( pOldSchema, pNewSchema );
m_pStr->FixupLocator ( pOldSchema, pNewSchema );
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
Expr_Contains_c::Command ( eCmd, pArg );
m_pStr->Command ( eCmd, pArg );
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_ContainsStrattr_c");
CALC_CHILD_HASH(m_pStr);
return CALC_PARENT_HASH();
}
ISphExpr * Clone () const final
{
return new Expr_ContainsStrattr_c ( *this );
}
private:
Expr_ContainsStrattr_c ( const Expr_ContainsStrattr_c& rhs )
: Expr_Contains_c ( rhs )
, m_pStr ( SafeClone (rhs.m_pStr) )
, m_bGeo ( rhs.m_bGeo )
{}
};
ISphExpr * ExprParser_t::CreateContainsNode ( const ExprNode_t & tNode )
{
// get and check them args
const ExprNode_t & tArglist = m_dNodes [ tNode.m_iLeft ];
const int iPoly = m_dNodes [ tArglist.m_iLeft ].m_iLeft;
const int iLat = m_dNodes [ tArglist.m_iLeft ].m_iRight;
const int iLon = tArglist.m_iRight;
assert ( IsNumeric ( m_dNodes[iLat].m_eRetType ) || IsJson ( m_dNodes[iLat].m_eRetType ) );
assert ( IsNumeric ( m_dNodes[iLon].m_eRetType ) || IsJson ( m_dNodes[iLon].m_eRetType ) );
assert ( m_dNodes[iPoly].m_eRetType==SPH_ATTR_POLY2D );
// create evaluator
// gotta handle an optimized constant poly case
CSphVector<int> dPolyArgs = GatherArgNodes ( m_dNodes[iPoly].m_iLeft );
CSphRefcountedPtr<ISphExpr> pLat { ConvertExprJson ( CreateTree ( iLat ) ) };
CSphRefcountedPtr<ISphExpr> pLon { ConvertExprJson ( CreateTree ( iLon ) ) };
bool bGeoTesselate = ( m_dNodes[iPoly].m_iToken==TOK_FUNC && m_dNodes[iPoly].m_iFunc==FUNC_GEOPOLY2D );
if ( dPolyArgs.GetLength()==1 && ( m_dNodes[dPolyArgs[0]].m_iToken==TOK_ATTR_STRING || m_dNodes[dPolyArgs[0]].m_iToken==TOK_COLUMNAR_STRING || m_dNodes[dPolyArgs[0]].m_iToken==TOK_ATTR_JSON ) )
{
CSphRefcountedPtr<ISphExpr> dPolyArgs0 { ConvertExprJson ( CreateTree ( dPolyArgs[0] ) ) };
return new Expr_ContainsStrattr_c ( pLat, pLon, dPolyArgs0, bGeoTesselate );
}
if ( dPolyArgs.all_of ( [&] ( int iArg ) { return IsConst ( &m_dNodes[iArg] ); } ) )
return new Expr_ContainsConstvec_c ( pLat, pLon, GetNameByLocator(iLat), GetNameByLocator(iLon), dPolyArgs, m_dNodes.Begin(), bGeoTesselate ); // POLY2D(numeric-consts)
else
{
// POLY2D(generic-exprs)
VecRefPtrs_t<ISphExpr*> dExprs;
dExprs.Resize ( dPolyArgs.GetLength() );
ARRAY_FOREACH ( i, dExprs )
dExprs[i] = CreateTree ( dPolyArgs[i] );
ConvertArgsJson ( dExprs );
// will adopt dExprs and utilize them on d-tr
return new Expr_ContainsExprvec_c ( pLat, pLon, dExprs );
}
}
class Expr_Remap_c : public ISphExpr
{
struct CondValPair_t
{
int64_t m_iCond;
union
{
int64_t m_iVal;
float m_fVal;
};
explicit CondValPair_t ( int64_t iCond=0 ) : m_iCond ( iCond ), m_iVal ( 0 ) {}
bool operator< ( const CondValPair_t & rhs ) const { return m_iCond<rhs.m_iCond; }
bool operator== ( const CondValPair_t & rhs ) const { return m_iCond==rhs.m_iCond; }
};
CSphRefcountedPtr<ISphExpr> m_pCond;
CSphRefcountedPtr<ISphExpr> m_pVal;
CSphVector<CondValPair_t> m_dPairs;
public:
Expr_Remap_c ( ISphExpr * pCondExpr, ISphExpr * pValExpr, const CSphVector<int64_t> & dConds, const ConstList_c & tVals )
: m_pCond ( pCondExpr )
, m_pVal ( pValExpr )
, m_dPairs ( dConds.GetLength() )
{
assert ( pCondExpr && pValExpr );
assert ( dConds.GetLength() );
assert ( dConds.GetLength()==tVals.m_dInts.GetLength() ||
dConds.GetLength()==tVals.m_dFloats.GetLength() );
SafeAddRef ( pCondExpr );
SafeAddRef ( pValExpr );
if ( tVals.m_dInts.GetLength() )
ARRAY_FOREACH ( i, m_dPairs )
{
m_dPairs[i].m_iCond = dConds[i];
m_dPairs[i].m_iVal = tVals.m_dInts[i];
}
else
ARRAY_FOREACH ( i, m_dPairs )
{
m_dPairs[i].m_iCond = dConds[i];
m_dPairs[i].m_fVal = tVals.m_dFloats[i];
}
m_dPairs.Uniq();
}
float Eval ( const CSphMatch & tMatch ) const final
{
const CondValPair_t * p = m_dPairs.BinarySearch ( CondValPair_t ( m_pCond->Int64Eval ( tMatch ) ) );
if ( p )
return p->m_fVal;
return m_pVal->Eval ( tMatch );
}
int IntEval ( const CSphMatch & tMatch ) const final
{
return (int)Int64Eval ( tMatch );
}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final
{
const CondValPair_t * p = m_dPairs.BinarySearch ( CondValPair_t ( m_pCond->Int64Eval ( tMatch ) ) );
if ( p )
return p->m_iVal;
return m_pVal->Int64Eval ( tMatch );
}
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) final
{
m_pCond->FixupLocator ( pOldSchema, pNewSchema );
m_pVal->FixupLocator ( pOldSchema, pNewSchema );
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
m_pCond->Command ( eCmd, pArg );
m_pVal->Command ( eCmd, pArg );
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_Remap_c");
CALC_POD_HASHES(m_dPairs);
CALC_CHILD_HASH(m_pCond);
CALC_CHILD_HASH(m_pVal);
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_Remap_c ( *this );
}
private:
Expr_Remap_c ( const Expr_Remap_c& rhs )
: m_pCond ( SafeClone (rhs.m_pCond) )
, m_pVal ( SafeClone (rhs.m_pVal) )
, m_dPairs ( rhs.m_dPairs )
{}
};
//////////////////////////////////////////////////////////////////////////
class Expr_GetQuery_c final : public Expr_StrNoLocator_c
{
public:
Expr_GetQuery_c() = default;
int StringEval ( const CSphMatch &, const BYTE ** ppStr ) const final
{
assert ( ppStr );
CSphString sVal = m_sQuery;
int iLen = sVal.Length();
*ppStr = (const BYTE *)sVal.Leak();
return iLen;
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
if ( eCmd==SPH_EXPR_SET_QUERY )
m_sQuery = (const char*)pArg;
}
bool IsDataPtrAttr() const final { return true; }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_GetQuery_c");
CALC_STR_HASH(m_sQuery, m_sQuery.Length());
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_GetQuery_c ( *this );
}
private:
Expr_GetQuery_c ( const Expr_GetQuery_c& rhs )
: m_sQuery ( rhs.m_sQuery )
{}
CSphString m_sQuery;
};
//////////////////////////////////////////////////////////////////////////
class Expr_LastInsertID_c : public Expr_StrNoLocator_c
{
public:
int StringEval ( const CSphMatch &, const BYTE ** ppStr ) const final
{
assert ( ppStr );
if ( m_sIds.IsEmpty() )
{
*ppStr = nullptr;
return 0;
}
CSphString sVal = m_sIds;
int iLen = sVal.Length();
*ppStr = (const BYTE *)sVal.Leak();
return iLen;
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
if ( eCmd==SPH_EXPR_SET_EXTRA_DATA )
{
static_cast<ISphExtra*>(pArg)->ExtraData ( EXTRA_GET_LAST_INSERT_ID, (void **)&m_sIds );
}
}
bool IsDataPtrAttr() const final { return true; }
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & bDisable ) final
{
bDisable = true;
return 0;
}
ISphExpr * Clone () const final
{
return new Expr_LastInsertID_c;
}
private:
CSphString m_sIds;
};
class Expr_UuidShort_c : public Expr_NoLocator_c
{
public:
Expr_UuidShort_c () = default;
float Eval ( const CSphMatch & ) const final { return (float) UidShort(); }
int IntEval ( const CSphMatch & ) const final { return (int)UidShort(); }
int64_t Int64Eval ( const CSphMatch & ) const final { return UidShort(); }
bool IsConst () const final { return true; }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_UuidShort_c");
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_UuidShort_c ( *this );
}
private:
Expr_UuidShort_c ( const Expr_UuidShort_c & rhs ) {};
};
//////////////////////////////////////////////////////////////////////////
ISphExpr * ConvertExprJson ( ISphExpr * pExpr )
{
if ( !pExpr )
return nullptr;
bool bConverted = false;
bool bJson = pExpr->IsJson ( bConverted );
if ( bJson && !bConverted )
{
ISphExpr * pConv = new Expr_JsonFieldConv_c ( pExpr );
pExpr->Release();
return pConv;
} else
{
return pExpr;
}
}
void ConvertArgsJson ( VecRefPtrs_t<ISphExpr*> & dArgs )
{
ARRAY_FOREACH ( i, dArgs )
{
dArgs[i] = ConvertExprJson ( dArgs[i] );
}
}
ISphExpr * ExprParser_t::CreateFieldNode ( int iField )
{
m_eEvalStage = SPH_EVAL_POSTLIMIT;
m_uStoredField = CSphColumnInfo::FIELD_STORED;
m_bNeedDocIds = true;
const CSphColumnInfo & tField = m_pSchema->GetField(iField);
if ( !(tField.m_uFieldFlags & CSphColumnInfo::FIELD_STORED) )
{
m_sCreateError.SetSprintf ( "field '%s' is not stored, see 'stored_fields' option", tField.m_sName.cstr() );
return nullptr;
}
return CreateExpr_GetStoredField ( tField.m_sName );
}
ISphExpr * ExprParser_t::CreateColumnarIntNode ( int iAttr, ESphAttr eAttrType )
{
const CSphColumnInfo & tAttr = m_pSchema->GetAttr(iAttr);
return CreateExpr_GetColumnarInt ( tAttr.m_sName, tAttr.m_uAttrFlags & CSphColumnInfo::ATTR_STORED );
}
ISphExpr * ExprParser_t::CreateColumnarFloatNode ( int iAttr )
{
const CSphColumnInfo & tAttr = m_pSchema->GetAttr(iAttr);
return CreateExpr_GetColumnarFloat ( tAttr.m_sName, tAttr.m_uAttrFlags & CSphColumnInfo::ATTR_STORED );
}
ISphExpr * ExprParser_t::CreateColumnarStringNode ( int iAttr )
{
const CSphColumnInfo & tAttr = m_pSchema->GetAttr(iAttr);
return CreateExpr_GetColumnarString ( tAttr.m_sName, tAttr.m_uAttrFlags & CSphColumnInfo::ATTR_STORED );
}
ISphExpr * ExprParser_t::CreateColumnarMvaNode ( int iAttr )
{
const CSphColumnInfo & tAttr = m_pSchema->GetAttr(iAttr);
return CreateExpr_GetColumnarMva ( tAttr.m_sName, tAttr.m_uAttrFlags & CSphColumnInfo::ATTR_STORED );
}
//////////////////////////////////////////////////////////////////////////
struct LevenshteinOptions_t
{
bool m_bNormalize = false;
int m_iLengthDelta = 0;
};
static LevenshteinOptions_t GetOptions ( const CSphNamedVariant * pValues, int iCount )
{
LevenshteinOptions_t tOpts;
for ( int i=0; i<iCount; i++, pValues++ )
{
if ( pValues->m_sKey=="normalize" )
tOpts.m_bNormalize = ( !!pValues->m_iValue );
else if ( pValues->m_sKey=="length_delta" )
tOpts.m_iLengthDelta = pValues->m_iValue;
}
return tOpts;
}
template<bool PATTERN_STRING>
class Expr_Levenshtein_c : public Expr_Binary_c
{
public:
Expr_Levenshtein_c ( ISphExpr * pPattern, ISphExpr * pAttr, const LevenshteinOptions_t tOpts )
: Expr_Binary_c ( "Expr_Levenshtein_c", pPattern, pAttr )
, m_tOpts ( tOpts )
{
if_const(PATTERN_STRING)
{
const BYTE * pBuf = nullptr;
CSphMatch tTmp;
m_iPattersLen = pPattern->StringEval ( tTmp, &pBuf );
m_sPattern.SetBinary ( (const char *)pBuf, m_iPattersLen );
FreeDataPtr ( *pPattern, pBuf );
}
}
int IntEval ( const CSphMatch & tMatch ) const final
{
assert ( !m_tOpts.m_bNormalize );
return GetDistance ( tMatch ).first;
}
float Eval ( const CSphMatch & tMatch ) const final
{
assert ( m_tOpts.m_bNormalize );
auto tDist = GetDistance ( tMatch );
float fDist = 1.0f;
if ( tDist.second )
fDist = (float)tDist.first / tDist.second;
return fDist;
}
ISphExpr * Clone () const final
{
return new Expr_Levenshtein_c ( *this );
}
private:
LevenshteinOptions_t m_tOpts;
CSphString m_sPattern;
int m_iPattersLen = 0;
mutable CSphVector<int> m_dTmp;
Expr_Levenshtein_c ( const Expr_Levenshtein_c& rhs )
: Expr_Binary_c ( rhs )
, m_tOpts ( rhs.m_tOpts )
, m_sPattern ( rhs.m_sPattern )
, m_iPattersLen ( rhs.m_iPattersLen )
{}
std::pair<int, int> GetDistance ( const CSphMatch & tMatch ) const
{
const BYTE * sPattern = (const BYTE *)m_sPattern.cstr();
int iPatternLen = m_iPattersLen;
if_const(!PATTERN_STRING)
iPatternLen = m_pFirst->StringEval ( tMatch, &sPattern );
const BYTE * pStr = nullptr;
int iLen = m_pSecond->StringEval ( tMatch, &pStr );
std::pair<int, int> tDist;
tDist.second = Max ( iPatternLen, iLen );
tDist.first = tDist.second;
if ( !m_tOpts.m_iLengthDelta || ( abs ( iPatternLen-iLen )<m_tOpts.m_iLengthDelta ) )
tDist.first = sphLevenshtein ( (const char *)sPattern, iPatternLen, (const char *)pStr, iLen, m_dTmp );
FreeDataPtr ( *m_pSecond, pStr );
if_const(!PATTERN_STRING)
FreeDataPtr ( *m_pFirst, sPattern );
return tDist;
}
};
ISphExpr * ExprParser_t::CreateLevenshteinNode ( ISphExpr * pPattern, ISphExpr * pAttr, ISphExpr * pOpts )
{
LevenshteinOptions_t tOpts;
if ( pOpts )
{
Expr_MapArg_c * pOptsArg = (Expr_MapArg_c *)pOpts;
tOpts = GetOptions ( pOptsArg->m_pValues, int ( pOptsArg->m_iCount ) );
}
if ( pPattern->IsConst() )
return new Expr_Levenshtein_c<true> ( pPattern, pAttr, tOpts );
else
return new Expr_Levenshtein_c<false> ( pPattern, pAttr, tOpts );
}
//////////////////////////////////////////////////////////////////////////
bool ExprParser_t::CheckStoredArg ( ISphExpr * pExpr )
{
if ( pExpr && pExpr->UsesDocstore() )
{
m_sCreateError.SetSprintf ( "stored fields can't be used in expressions" );
return false;
}
return true;
}
ISphExpr * ExprParser_t::CreateCmp ( const ExprNode_t & tNode, ISphExpr * pLeft, ISphExpr * pRight )
{
int iOp = tNode.m_iToken;
// str case
if ( ( m_dNodes[tNode.m_iLeft].m_eRetType==SPH_ATTR_STRING
|| m_dNodes[tNode.m_iLeft].m_eRetType==SPH_ATTR_STRINGPTR
|| m_dNodes[tNode.m_iLeft].m_eRetType==SPH_ATTR_JSON_FIELD
)
&& ( m_dNodes[tNode.m_iRight].m_eRetType==SPH_ATTR_STRING
|| m_dNodes[tNode.m_iRight].m_eRetType==SPH_ATTR_STRINGPTR )
)
{
if ( !CheckStoredArg(pLeft) || !CheckStoredArg(pRight) )
return nullptr;
switch ( iOp )
{
case '<':
return new Expr_StrCmp_c ( pLeft, pRight, m_eCollation, false, EStrCmpDir::LT );
case '>':
return new Expr_StrCmp_c ( pLeft, pRight, m_eCollation, false, EStrCmpDir::GT );
case TOK_LTE:
return new Expr_StrCmp_c ( pLeft, pRight, m_eCollation, true, EStrCmpDir::GT );
case TOK_GTE:
return new Expr_StrCmp_c ( pLeft, pRight, m_eCollation, true, EStrCmpDir::LT );
case TOK_EQ:
return new Expr_StrCmp_c ( pLeft, pRight, m_eCollation, false, EStrCmpDir::EQ );
case TOK_NE:
return new Expr_StrCmp_c ( pLeft, pRight, m_eCollation, true, EStrCmpDir::EQ );
default: assert ( 0 && "unhandled token type" ); break;
}
}
// numeric case
#define LOC_SPAWN_POLY(_classname) switch (tNode.m_eArgType) { \
case SPH_ATTR_INTEGER: return new _classname##Int_c ( pLeft, pRight ); \
case SPH_ATTR_BIGINT: return new _classname##Int64_c ( pLeft, pRight ); \
default: return new _classname##Float_c ( pLeft, pRight ); }
switch ( iOp )
{
case '<': LOC_SPAWN_POLY ( Expr_Lt );
case '>': LOC_SPAWN_POLY ( Expr_Gt );
case TOK_LTE: LOC_SPAWN_POLY ( Expr_Lte );
case TOK_GTE: LOC_SPAWN_POLY ( Expr_Gte );
case TOK_EQ: LOC_SPAWN_POLY ( Expr_Eq );
case TOK_NE: LOC_SPAWN_POLY ( Expr_Ne );
default: assert ( 0 && "unhandled token type" ); break;
}
#undef LOC_SPAWN_POLY
return nullptr;
}
bool ExprParser_t::PrepareFuncArgs ( const ExprNode_t & tNode, bool bSkipChildren, CSphRefcountedPtr<ISphExpr> & pLeft, CSphRefcountedPtr<ISphExpr> & pRight, VecRefPtrs_t<ISphExpr*> & dArgs )
{
// fold arglist to array
if ( !bSkipChildren )
{
SafeAddRef (pLeft);
MoveToArgList ( pLeft, dArgs );
if ( pRight )
{
pRight->AddRef ();
MoveToArgList ( pRight, dArgs );
}
}
for ( auto & i : dArgs )
if ( !CheckStoredArg(i) )
return false;
// spawn proper function
assert ( tNode.m_iFunc>=0 && tNode.m_iFunc<int(sizeof(g_dFuncs)/sizeof(g_dFuncs[0])) );
assert (
( bSkipChildren ) || // function will handle its arglist,
( g_dFuncs[tNode.m_iFunc].m_iArgs>=0 && g_dFuncs[tNode.m_iFunc].m_iArgs==dArgs.GetLength() ) || // arg count matches,
( g_dFuncs[tNode.m_iFunc].m_iArgs<0 && -g_dFuncs[tNode.m_iFunc].m_iArgs<=dArgs.GetLength() ) ); // or min vararg count reached
auto eFunc = (Tokh_e)tNode.m_iFunc;
switch ( eFunc )
{
case FUNC_TO_STRING:
case FUNC_INTERVAL:
case FUNC_IN:
case FUNC_LENGTH:
case FUNC_LEAST:
case FUNC_GREATEST:
case FUNC_ALL:
case FUNC_ANY:
case FUNC_INDEXOF:
break; // these have its own JSON converters
// all others will get JSON auto-converter
default:
ConvertArgsJson ( dArgs );
break;
}
return true;
}
ISphExpr * ExprParser_t::CreateFuncExpr ( int iNode, VecRefPtrs_t<ISphExpr*> & dArgs )
{
const ExprNode_t & tNode = m_dNodes[iNode];
auto eFunc = (Tokh_e)tNode.m_iFunc;
switch ( eFunc )
{
case FUNC_NOW: return CreateExprNow(m_iConstNow);
case FUNC_ABS: return new Expr_Abs_c ( dArgs[0] );
case FUNC_CEIL: return new Expr_Ceil_c ( dArgs[0] );
case FUNC_FLOOR: return new Expr_Floor_c ( dArgs[0] );
case FUNC_SIN: return new Expr_Sin_c ( dArgs[0] );
case FUNC_COS: return new Expr_Cos_c ( dArgs[0] );
case FUNC_LN: return new Expr_Ln_c ( dArgs[0] );
case FUNC_LOG2: return new Expr_Log2_c ( dArgs[0] );
case FUNC_LOG10: return new Expr_Log10_c ( dArgs[0] );
case FUNC_EXP: return new Expr_Exp_c ( dArgs[0] );
case FUNC_SQRT: return new Expr_Sqrt_c ( dArgs[0] );
case FUNC_SINT: return new Expr_Sint_c ( dArgs[0] );
case FUNC_CRC32: return new Expr_Crc32_c ( dArgs[0] );
case FUNC_FIBONACCI:return new Expr_Fibonacci_c ( dArgs[0] );
case FUNC_KNN_DIST: return new Expr_GetFloat_c ( m_pSchema->GetAttr ( GetKnnDistAttrName() )->m_tLocator, GetKnnDistAttrName() );
case FUNC_DAY: return CreateExprDay ( dArgs[0] );
case FUNC_WEEK: return CreateExprWeek ( dArgs[0], dArgs.GetLength()>1 ? dArgs[1] : nullptr );
case FUNC_MONTH: return CreateExprMonth ( dArgs[0] );
case FUNC_YEAR: return CreateExprYear ( dArgs[0] );
case FUNC_YEARMONTH: return CreateExprYearMonth ( dArgs[0] );
case FUNC_YEARMONTHDAY: return CreateExprYearMonthDay ( dArgs[0] );
case FUNC_YEARWEEK: return CreateExprYearWeek ( dArgs[0] );
case FUNC_HOUR: return CreateExprHour ( dArgs[0] );
case FUNC_MINUTE: return CreateExprMinute ( dArgs[0] );
case FUNC_SECOND: return CreateExprSecond ( dArgs[0] );
case FUNC_DAYOFWEEK: return CreateExprDayOfWeek ( dArgs[0] );
case FUNC_DAYOFYEAR: return CreateExprDayOfYear ( dArgs[0] );
case FUNC_QUARTER: return CreateExprQuarter ( dArgs[0] );
case FUNC_MIN: return new Expr_Min_c ( dArgs[0], dArgs[1] );
case FUNC_MAX: return new Expr_Max_c ( dArgs[0], dArgs[1] );
case FUNC_POW: return new Expr_Pow_c ( dArgs[0], dArgs[1] );
case FUNC_IDIV: return new Expr_Idiv_c ( dArgs[0], dArgs[1] );
case FUNC_IF: return new Expr_If_c ( dArgs[0], dArgs[1], dArgs[2] );
case FUNC_MADD: return new Expr_Madd_c ( dArgs[0], dArgs[1], dArgs[2] );
case FUNC_MUL3: return new Expr_Mul3_c ( dArgs[0], dArgs[1], dArgs[2] );
case FUNC_ATAN2: return new Expr_Atan2_c ( dArgs[0], dArgs[1] );
case FUNC_RAND: return new Expr_Rand_c ( dArgs.GetLength() ? dArgs[0] : nullptr,
tNode.m_iLeft<0 ? false : IsConst ( &m_dNodes[tNode.m_iLeft] ));
case FUNC_INTERVAL: return CreateIntervalNode ( tNode.m_iLeft, dArgs );
case FUNC_IN: return CreateInNode ( iNode );
case FUNC_LENGTH: return CreateLengthNode ( tNode, dArgs[0] );
case FUNC_BITDOT: return CreateBitdotNode ( tNode.m_iLeft, dArgs );
case FUNC_REMAP:
{
CSphRefcountedPtr<ISphExpr> pCond ( CreateTree ( tNode.m_iLeft ) );
CSphRefcountedPtr<ISphExpr> pVal ( CreateTree ( tNode.m_iRight ) );
assert ( pCond && pVal );
// This is a hack. I know how parser fills m_dNodes and thus know where to find constlists.
const CSphVector<int64_t> & dConds = m_dNodes [ iNode-2 ].m_pConsts->m_dInts;
const ConstList_c & tVals = *m_dNodes [ iNode-1 ].m_pConsts;
return new Expr_Remap_c ( pCond, pVal, dConds, tVals );
}
case FUNC_GEODIST: return CreateGeodistNode ( tNode.m_iLeft );
case FUNC_EXIST: return CreateExistNode ( tNode );
case FUNC_CONTAINS: return CreateContainsNode ( tNode );
case FUNC_POLY2D:
case FUNC_GEOPOLY2D:break; // just make gcc happy
case FUNC_ZONESPANLIST:
m_bHasZonespanlist = true;
m_eEvalStage = SPH_EVAL_PRESORT;
return new Expr_GetZonespanlist_c ();
case FUNC_TO_STRING:
if ( !CheckStoredArg(dArgs[0]) )
return nullptr;
return new Expr_ToString_c ( dArgs[0], m_dNodes [ tNode.m_iLeft ].m_eRetType );
case FUNC_CONCAT:
return CreateConcatNode ( tNode.m_iLeft, dArgs );
case FUNC_RANKFACTORS:
m_eEvalStage = SPH_EVAL_PRESORT;
return new Expr_GetRankFactors_c();
case FUNC_FACTORS:
return CreatePFNode ( tNode.m_iLeft );
case FUNC_BM25F:
{
m_uPackedFactorFlags |= SPH_FACTOR_ENABLE;
CSphVector<int> dBM25FArgs = GatherArgNodes ( tNode.m_iLeft );
const ExprNode_t & tLeft = m_dNodes [ dBM25FArgs[0] ];
const ExprNode_t & tRight = m_dNodes [ dBM25FArgs[1] ];
float fK1 = tLeft.m_fConst;
float fB = tRight.m_fConst;
fK1 = Max ( fK1, 0.001f );
fB = Min ( Max ( fB, 0.0f ), 1.0f );
CSphVector<CSphNamedVariant> * pFieldWeights = nullptr;
if ( dBM25FArgs.GetLength()>2 )
pFieldWeights = &m_dNodes [ dBM25FArgs[2] ].m_pMapArg->m_dPairs;
return new Expr_BM25F_c ( fK1, fB, pFieldWeights );
}
case FUNC_QUERY:
return new Expr_GetQuery_c;
case FUNC_BIGINT:
case FUNC_INTEGER:
case FUNC_DOUBLE:
case FUNC_UINT:
case FUNC_UINT64:
SafeAddRef ( dArgs[0] );
return dArgs[0];
case FUNC_LEAST: return CreateAggregateNode ( tNode, SPH_AGGR_MIN, dArgs[0] );
case FUNC_GREATEST: return CreateAggregateNode ( tNode, SPH_AGGR_MAX, dArgs[0] );
case FUNC_CURTIME: return CreateExprCurTime ( false, false );
case FUNC_CURDATE: return CreateExprCurDate();
case FUNC_TIME: return CreateExprTime ( dArgs[0] );
case FUNC_DATE: return CreateExprDate ( dArgs[0] );
case FUNC_DAYNAME: return CreateExprDayName ( dArgs[0] );
case FUNC_MONTHNAME: return CreateExprMonthName ( dArgs[0] );
case FUNC_UTC_TIME: return CreateExprCurTime ( true, false );
case FUNC_UTC_TIMESTAMP: return CreateExprCurTime ( true, true );
case FUNC_TIMEDIFF: return CreateExprTimeDiff ( dArgs[0], dArgs[1] );
case FUNC_DATEDIFF: return CreateExprDateDiff ( dArgs[0], dArgs[1] );
case FUNC_DATEADD: return CreateExprDateAdd ( iNode, true );
case FUNC_DATESUB: return CreateExprDateAdd ( iNode, false );
case FUNC_ALL:
case FUNC_ANY:
case FUNC_INDEXOF:
return CreateForInNode ( iNode );
case FUNC_MIN_TOP_WEIGHT:
m_eEvalStage = SPH_EVAL_PRESORT;
return new Expr_MinTopWeight_c();
case FUNC_MIN_TOP_SORTVAL:
m_eEvalStage = SPH_EVAL_PRESORT;
return new Expr_MinTopSortval_c();
case FUNC_REGEX:
return CreateRegexNode ( dArgs[0], dArgs[1] );
case FUNC_SUBSTRING_INDEX:
if ( !CheckStoredArg(dArgs[0]) || !CheckStoredArg(dArgs[1]) )
return nullptr;
return new Expr_SubstringIndex_c ( dArgs[0], dArgs[1], dArgs[2] );
case FUNC_UPPER:
{
if ( IsGlobalLocaleSet() && GlobalLocale()==std::locale::classic() )
return new ExprCaseTrival_c<true> ( dArgs[0] );
else
return new ExprCaseComplex_c<true> ( dArgs[0] );
}
case FUNC_LOWER:
{
if ( IsGlobalLocaleSet() && GlobalLocale()==std::locale::classic() )
return new ExprCaseTrival_c<false> ( dArgs[0] );
else
return new ExprCaseComplex_c<false> ( dArgs[0] );
}
case FUNC_LAST_INSERT_ID: return new Expr_LastInsertID_c();
case FUNC_CURRENT_USER:
case FUNC_USER:
{
auto sUser = CurrentUser();
return new Expr_GetStrConst_c ( sUser.first, sUser.second, false );
}
case FUNC_CONNECTION_ID: return new Expr_GetIntConst_c ( ConnID() );
case FUNC_LEVENSHTEIN: return CreateLevenshteinNode ( dArgs[0], dArgs[1], ( dArgs.GetLength()>2 ? dArgs[2] : nullptr ) );
case FUNC_DATE_FORMAT: return CreateExprDateFormat ( dArgs[0], dArgs[1] );
case FUNC_DATABASE: return new Expr_GetStrConst_c ( FROMS ( "Manticore" ), false ) ;
case FUNC_VERSION: return new Expr_GetStrConst_c ( FromStr ( sphinxexpr::MySQLVersion() ), false );
case FUNC_RANGE:
case FUNC_DATE_RANGE:
{
CSphRefcountedPtr<ISphExpr> pVal ( nullptr );
CSphVector<VecTraits_T < CSphNamedVariant > > dSrcRanges;
GatherArgFN ( tNode.m_iLeft, [&] ( int i )
{
if ( m_dNodes[i].m_eRetType==SPH_ATTR_MAPARG )
dSrcRanges.Add ( m_dNodes[i].m_pMapArg->m_dPairs );
else
pVal = CreateTree ( i );
});
AggrRangeSetting_t tRanges;
if ( !ParseAggrRange ( dSrcRanges, ( eFunc==FUNC_DATE_RANGE ), m_iConstNow, tRanges, m_sCreateError ) )
return nullptr;
return CreateExprRange ( pVal, tRanges );
}
case FUNC_HISTOGRAM:
case FUNC_DATE_HISTOGRAM:
{
CSphRefcountedPtr<ISphExpr> pVal;
VecTraits_T < CSphNamedVariant > dSrcOpt;
GatherArgFN ( tNode.m_iLeft, [&] ( int i )
{
if ( m_dNodes[i].m_eRetType==SPH_ATTR_MAPARG )
dSrcOpt = m_dNodes[i].m_pMapArg->m_dPairs;
else
pVal = CreateTree ( i );
});
if ( eFunc==FUNC_HISTOGRAM )
{
AggrHistSetting_t tHist;
if ( !ParseAggrHistogram ( dSrcOpt, tHist, m_sCreateError ) )
return nullptr;
return CreateExprHistogram ( pVal, tHist );
} else
{
AggrDateHistSetting_t tHist;
if ( !ParseAggrDateHistogram ( dSrcOpt, tHist, m_sCreateError ) )
return nullptr;
return CreateExprDateHistogram ( pVal, tHist );
}
}
case FUNC_UUID_SHORT: return new Expr_UuidShort_c();
default: // just make gcc happy
assert ( 0 && "unhandled function id" );
}
return nullptr;
}
/// fold nodes subtree into opcodes
ISphExpr * ExprParser_t::CreateTree ( int iNode )
{
if ( iNode<0 || GetCreateError() )
return nullptr;
const ExprNode_t & tNode = m_dNodes[iNode];
int iOp = tNode.m_iToken;
if ( iOp<=0 ) // tree doesn't need to be created (usually it was optimized away).
return nullptr;
// avoid spawning argument node in some cases
bool bSkipChildren = false;
if ( iOp==TOK_FUNC )
{
switch ( tNode.m_iFunc )
{
case FUNC_NOW:
case FUNC_IN:
case FUNC_EXIST:
case FUNC_GEODIST:
case FUNC_CONTAINS:
case FUNC_ZONESPANLIST:
case FUNC_RANKFACTORS:
case FUNC_FACTORS:
case FUNC_BM25F:
case FUNC_CURTIME:
case FUNC_CURDATE:
case FUNC_UTC_TIME:
case FUNC_UTC_TIMESTAMP:
case FUNC_ALL:
case FUNC_ANY:
case FUNC_INDEXOF:
case FUNC_MIN_TOP_WEIGHT:
case FUNC_MIN_TOP_SORTVAL:
case FUNC_REMAP:
case FUNC_LAST_INSERT_ID:
case FUNC_QUERY:
case FUNC_CURRENT_USER:
case FUNC_CONNECTION_ID:
case FUNC_DATABASE:
case FUNC_USER:
case FUNC_VERSION:
case FUNC_KNN_DIST:
case FUNC_RANGE:
case FUNC_HISTOGRAM:
case FUNC_DATE_RANGE:
case FUNC_DATE_HISTOGRAM:
case FUNC_UUID_SHORT:
bSkipChildren = true;
break;
default:
break;
}
}
CSphRefcountedPtr<ISphExpr> pLeft ( (tNode.m_iLeft<0 || bSkipChildren) ? nullptr : CreateTree ( tNode.m_iLeft ) );
CSphRefcountedPtr<ISphExpr> pRight ( (tNode.m_iRight<0 || bSkipChildren) ? nullptr : CreateTree ( tNode.m_iRight ) );
if ( GetCreateError() )
return nullptr;
#define LOC_SPAWN_POLY(_classname) switch (tNode.m_eArgType) { \
case SPH_ATTR_INTEGER: return new _classname##Int_c ( pLeft, pRight ); \
case SPH_ATTR_BIGINT: return new _classname##Int64_c ( pLeft, pRight ); \
default: return new _classname##Float_c ( pLeft, pRight ); }
switch (iOp)
{
case '+':
case '-':
case '*':
case '/':
case '&':
case '|':
case '%':
case '<':
case '>':
case TOK_LTE:
case TOK_GTE:
case TOK_EQ:
case TOK_NE:
case TOK_AND:
case TOK_OR:
case TOK_NOT:
if ( pLeft && m_dNodes[tNode.m_iLeft].m_eRetType==SPH_ATTR_JSON_FIELD && m_dNodes[tNode.m_iLeft].m_iToken==TOK_ATTR_JSON )
pLeft = new Expr_JsonFieldConv_c ( pLeft );
if ( pRight && m_dNodes[tNode.m_iRight].m_eRetType==SPH_ATTR_JSON_FIELD && m_dNodes[tNode.m_iRight].m_iToken==TOK_ATTR_JSON )
pRight = new Expr_JsonFieldConv_c ( pRight );
break;
default:
break;
}
switch (iOp)
{
case TOK_ATTR_INT: return new Expr_GetInt_c ( tNode.m_tLocator, GetNameByLocator(tNode) );
case TOK_ATTR_BITS: return new Expr_GetBits_c ( tNode.m_tLocator, GetNameByLocator(tNode) );
case TOK_ATTR_FLOAT: return new Expr_GetFloat_c ( tNode.m_tLocator, GetNameByLocator(tNode) );
case TOK_ATTR_SINT: return new Expr_GetSint_c ( tNode.m_tLocator, GetNameByLocator(tNode) );
case TOK_ATTR_STRING: return new Expr_GetString_c ( tNode.m_tLocator, GetNameByLocator(tNode) );
case TOK_ATTR_MVA64:
case TOK_ATTR_MVA32: return new Expr_GetMva_c ( tNode.m_tLocator, GetNameByLocator(tNode) );
case TOK_ATTR_FACTORS: return new Expr_GetFactorsAttr_c ( tNode.m_tLocator, GetNameByLocator(tNode) );
case TOK_COLUMNAR_INT: return CreateColumnarIntNode ( tNode.m_iLocator, SPH_ATTR_INTEGER );
case TOK_COLUMNAR_TIMESTAMP:return CreateColumnarIntNode ( tNode.m_iLocator, SPH_ATTR_TIMESTAMP );
case TOK_COLUMNAR_BIGINT: return CreateColumnarIntNode ( tNode.m_iLocator, SPH_ATTR_BIGINT );
case TOK_COLUMNAR_BOOL: return CreateColumnarIntNode ( tNode.m_iLocator, SPH_ATTR_BOOL );
case TOK_COLUMNAR_FLOAT: return CreateColumnarFloatNode ( tNode.m_iLocator );
case TOK_COLUMNAR_STRING: return CreateColumnarStringNode ( tNode.m_iLocator );
case TOK_COLUMNAR_UINT32SET:
case TOK_COLUMNAR_INT64SET:
case TOK_COLUMNAR_FLOATVEC: return CreateColumnarMvaNode ( tNode.m_iLocator );
case TOK_FIELD: return CreateFieldNode ( tNode.m_iLocator );
case TOK_CONST_FLOAT: return new Expr_GetConst_c ( tNode.m_fConst );
case TOK_CONST_INT:
switch (tNode.m_eRetType)
{
case SPH_ATTR_INTEGER: return new Expr_GetIntConst_c ( (int) tNode.m_iConst );
case SPH_ATTR_BIGINT: return new Expr_GetInt64Const_c ( tNode.m_iConst );
default: return new Expr_GetConst_c ( float ( tNode.m_iConst ) );
}
case TOK_CONST_STRING:
return new Expr_GetStrConst_c ( m_sExpr.first+GetConstStrOffset(tNode), GetConstStrLength(tNode), true );
case TOK_SUBKEY:
return new Expr_GetStrConst_c ( m_sExpr.first+GetConstStrOffset(tNode), GetConstStrLength(tNode), false );
case TOK_WEIGHT: return new Expr_GetWeight_c();
case '+': return new Expr_Add_c ( pLeft, pRight );
case '-': return new Expr_Sub_c ( pLeft, pRight );
case '*': return new Expr_Mul_c ( pLeft, pRight );
case '/': return new Expr_Div_c ( pLeft, pRight );
case '&': return new Expr_BitAnd_c ( pLeft, pRight );
case '|': return new Expr_BitOr_c ( pLeft, pRight );
case '%': return new Expr_Mod_c ( pLeft, pRight );
case '<':
case '>':
case TOK_LTE:
case TOK_GTE:
case TOK_EQ:
case TOK_NE:
return CreateCmp ( tNode, pLeft, pRight );
case TOK_AND: LOC_SPAWN_POLY ( Expr_And );
case TOK_OR: LOC_SPAWN_POLY ( Expr_Or );
case TOK_NOT:
return ( tNode.m_eArgType==SPH_ATTR_BIGINT )
? (ISphExpr * ) new Expr_NotInt64_c ( pLeft )
: (ISphExpr * ) new Expr_NotInt_c ( pLeft );
case ',':
if ( pLeft && pRight )
return new Expr_Arglist_c ( pLeft, pRight );
break;
case TOK_NEG: assert ( !pRight ); return new Expr_Neg_c ( pLeft );
case TOK_FUNC:
{
VecRefPtrs_t<ISphExpr*> dArgs;
if ( !PrepareFuncArgs ( tNode, bSkipChildren, pLeft, pRight, dArgs ) )
return nullptr;
return CreateFuncExpr ( iNode, dArgs );
}
case TOK_UDF: return CreateUdfNode ( tNode.m_iFunc, pLeft );
case TOK_HOOK_IDENT: return m_pHook->CreateNode ( tNode.m_iFunc, nullptr, nullptr, nullptr, nullptr, m_sCreateError );
case TOK_HOOK_FUNC: return m_pHook->CreateNode ( tNode.m_iFunc, pLeft, m_pSchema, &m_eEvalStage, &m_bNeedDocIds, m_sCreateError );
case TOK_MAP_ARG:
// tricky bit
// data gets moved (!) from node to ISphExpr at this point
return new Expr_MapArg_c ( tNode.m_pMapArg->m_dPairs );
case TOK_ATTR_JSON:
if ( pLeft && m_dNodes[tNode.m_iLeft].m_iToken==TOK_SUBKEY && !tNode.m_tLocator.m_bDynamic )
{
// json key is a single static subkey, switch to fastpath
return new Expr_JsonFastKey_c ( tNode.m_tLocator, GetNameByLocator(tNode), pLeft );
} else
{
// json key is a generic expression, use generic catch-all JsonField
VecRefPtrs_t<ISphExpr*> dArgs;
CSphVector<ESphAttr> dTypes;
if ( pLeft ) // may be NULL (top level array)
{
MoveToArgList ( pLeft.Leak (), dArgs );
GatherArgRetTypes ( tNode.m_iLeft, dTypes );
}
return new Expr_JsonField_c ( tNode.m_tLocator, GetNameByLocator(tNode), dArgs, dTypes );
}
case TOK_ITERATOR:
{
// iterator, e.g. handles "x.gid" in SELECT ALL(x.gid=1 FOR x IN json.array)
VecRefPtrs_t<ISphExpr*> dArgs;
CSphVector<ESphAttr> dTypes;
if ( pLeft )
{
MoveToArgList ( pLeft.Leak (), dArgs );
GatherArgRetTypes ( tNode.m_iLeft, dTypes );
}
CSphRefcountedPtr<ISphExpr> pIterator { new Expr_Iterator_c ( tNode.m_tLocator, GetNameByLocator(tNode), dArgs, dTypes, tNode.m_pAttr ) };
return new Expr_JsonFieldConv_c ( pIterator );
}
case TOK_IDENT: m_sCreateError.SetSprintf ( "unknown column: %s", tNode.m_sIdent ); break;
case TOK_IS_NULL:
case TOK_IS_NOT_NULL:
if ( m_dNodes[tNode.m_iLeft].m_eRetType==SPH_ATTR_JSON_FIELD )
return new Expr_JsonFieldIsNull_c ( pLeft, tNode.m_iToken==TOK_IS_NULL );
else
return new Expr_GetIntConst_c ( tNode.m_iToken!=TOK_IS_NULL );
default: assert ( 0 && "unhandled token type" ); break;
}
#undef LOC_SPAWN_POLY
// fire exit
return nullptr;
}
//////////////////////////////////////////////////////////////////////////
/// INTERVAL() evaluator for constant turn point values case
template < typename T >
class Expr_IntervalConst_c : public Expr_ArgVsConstSet_T<T>
{
public:
/// pre-evaluate and dismiss turn points
explicit Expr_IntervalConst_c ( CSphVector<ISphExpr *> & dArgs )
: Expr_ArgVsConstSet_T<T> ( dArgs[0], dArgs, 1 )
{}
/// evaluate arg, return interval id
int IntEval ( const CSphMatch & tMatch ) const final
{
T val = this->ExprEval ( this->m_pArg, tMatch ); // 'this' fixes gcc braindamage
ARRAY_FOREACH ( i, this->m_dValues ) // FIXME! OPTIMIZE! perform binary search here
if ( val<this->m_dValues[i] )
return i;
return this->m_dValues.GetLength();
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_IntervalConst_c");
return Expr_ArgVsConstSet_T<T>::CalcHash ( szClassName, tSorterSchema, uHash, bDisable ); // can't do CALC_PARENT_HASH because of gcc and templates
}
ISphExpr* Clone () const final
{
return new Expr_IntervalConst_c ( *this );
}
private:
Expr_IntervalConst_c ( const Expr_IntervalConst_c& ) = default;
};
/// generic INTERVAL() evaluator
template < typename T >
class Expr_Interval_c : public Expr_ArgVsSet_T<T>
{
protected:
VecRefPtrs_t<ISphExpr*> m_dTurnPoints;
public:
explicit Expr_Interval_c ( const CSphVector<ISphExpr *> & dArgs )
: Expr_ArgVsSet_T<T> ( dArgs[0] )
{
for ( int i=1; i<dArgs.GetLength(); ++i )
{
SafeAddRef ( dArgs[i] );
m_dTurnPoints.Add ( dArgs[i] );
}
}
/// evaluate arg, return interval id
int IntEval ( const CSphMatch & tMatch ) const final
{
T val = this->ExprEval ( this->m_pArg, tMatch ); // 'this' fixes gcc braindamage
ARRAY_FOREACH ( i, m_dTurnPoints )
if ( val < Expr_ArgVsSet_T<T>::ExprEval ( m_dTurnPoints[i], tMatch ) )
return i;
return m_dTurnPoints.GetLength();
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
Expr_ArgVsSet_T<T>::Command ( eCmd, pArg );
ARRAY_FOREACH ( i, m_dTurnPoints )
m_dTurnPoints[i]->Command ( eCmd, pArg );
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_Interval_c");
CALC_CHILD_HASHES(m_dTurnPoints);
return Expr_ArgVsSet_T<T>::CalcHash ( szClassName, tSorterSchema, uHash, bDisable ); // can't do CALC_PARENT_HASH because of gcc and templates
}
ISphExpr * Clone () const final
{
return new Expr_Interval_c ( *this );
}
private:
Expr_Interval_c ( const Expr_Interval_c& rhs )
: Expr_ArgVsSet_T<T> ( rhs )
{
m_dTurnPoints.Resize ( rhs.m_dTurnPoints.GetLength() );
ARRAY_FOREACH ( i, m_dTurnPoints )
m_dTurnPoints[i] = rhs.m_dTurnPoints[i]->Clone();
}
};
//////////////////////////////////////////////////////////////////////////
/// IN() evaluator, arbitrary scalar expression vs. constant values
template < typename T, bool BINARY >
class Expr_In_c : public Expr_ArgVsConstSet_T<T>
{
public:
/// pre-sort values for binary search
Expr_In_c ( ISphExpr * pArg, ConstList_c * pConsts ) :
Expr_ArgVsConstSet_T<T> ( pArg, pConsts, false )
{
this->m_dValues.Uniq();
}
/// evaluate arg, check if the value is within set
int IntEval ( const CSphMatch & tMatch ) const final
{
T val = this->ExprEval ( this->m_pArg, tMatch ); // 'this' fixes gcc braindamage
if_const ( BINARY )
return this->m_dValues.BinarySearch ( val )!=nullptr;
else
{
for ( auto i : this->m_dValues )
if ( i==val )
return 1;
return 0;
}
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_In_c");
return Expr_ArgVsConstSet_T<T>::CalcHash ( szClassName, tSorterSchema, uHash, bDisable ); // can't do CALC_PARENT_HASH because of gcc and templates
}
ISphExpr * Clone () const final
{
return new Expr_In_c ( *this );
}
private:
Expr_In_c ( const Expr_In_c& ) = default;
};
/// IN() evaluator, arbitrary scalar expression vs. uservar
/// (for the sake of evaluator, uservar is a pre-sorted, refcounted external vector)
class Expr_InUservar_c : public Expr_ArgVsConstSet_T<int64_t>
{
public:
/// just get hold of args
explicit Expr_InUservar_c ( ISphExpr * pArg, const UservarIntSet_c& pConsts )
: Expr_ArgVsConstSet_T<int64_t> ( pArg, pConsts )
{
this->m_dValues.Sort ();
}
/// evaluate arg, check if the value is within set
int IntEval ( const CSphMatch & tMatch ) const final
{
int64_t iVal = ExprEval ( this->m_pArg, tMatch ); // 'this' fixes gcc braindamage
return this->m_dValues.BinarySearch ( iVal )!=nullptr;
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_InUservar_c");
return Expr_ArgVsConstSet_T<int64_t>::CalcHash ( szClassName, tSorterSchema, uHash, bDisable );
}
ISphExpr * Clone () const final
{
return new Expr_InUservar_c ( *this );
}
private:
Expr_InUservar_c ( const Expr_InUservar_c& ) = default;
};
/// IN() evaluator, MVA attribute vs. constant values
template < typename T >
class Expr_MVAIn_c : public Expr_ArgVsConstSet_T<int64_t>, public ExprLocatorTraits_t
{
public:
/// pre-sort values for binary search
Expr_MVAIn_c ( const CSphAttrLocator & tLoc, const CSphString & sAttr, ConstList_c * pConsts )
: Expr_ArgVsConstSet_T<int64_t> ( nullptr, pConsts, false )
, ExprLocatorTraits_t ( tLoc, sAttr )
{
assert ( pConsts );
this->m_dValues.Sort();
}
ByteBlob_t MvaEval ( const CSphMatch & ) const final { assert ( 0 && "not implemented" ); return {nullptr,0}; }
/// evaluate arg, check if any values are within set
int IntEval ( const CSphMatch & tMatch ) const final
{
auto dMva = tMatch.FetchAttrData ( m_tLocator, m_pBlobPool );
return MvaEval_Any<T> ( dMva, m_dValues );
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
Expr_ArgVsConstSet_T<int64_t>::Command ( eCmd, pArg );
ExprLocatorTraits_t::HandleCommand ( eCmd, pArg );
if ( eCmd==SPH_EXPR_SET_BLOB_POOL )
m_pBlobPool = (const BYTE *)pArg;
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_MVAIn_c");
return CALC_DEP_HASHES_EX(m_uValueHash);
}
ISphExpr * Clone () const final
{
return new Expr_MVAIn_c ( *this );
}
protected:
const BYTE * m_pBlobPool {nullptr};
private:
Expr_MVAIn_c ( const Expr_MVAIn_c & rhs )
: Expr_ArgVsConstSet_T<int64_t> ( rhs )
, ExprLocatorTraits_t ( rhs )
{}
};
/// IN() evaluator, MVA attribute vs. uservars
template < typename T >
class Expr_MVAInU_c : public Expr_ArgVsConstSet_T<int64_t>, public ExprLocatorTraits_t
{
public:
/// pre-sort values for binary search
Expr_MVAInU_c ( const CSphAttrLocator & tLoc, const CSphString & sAttr, const UservarIntSet_c & pUservar )
: Expr_ArgVsConstSet_T<int64_t> ( nullptr, pUservar )
, ExprLocatorTraits_t ( tLoc, sAttr )
{
assert ( pUservar );
this->m_dValues.Sort();
}
ByteBlob_t MvaEval ( const CSphMatch & ) const final { assert ( 0 && "not implemented" ); return { nullptr, 0}; }
/// evaluate arg, check if any values are within set
int IntEval ( const CSphMatch & tMatch ) const final
{
auto dMva = tMatch.FetchAttrData ( m_tLocator, m_pBlobPool );
return MvaEval_Any<T> ( dMva, m_dValues );
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
Expr_ArgVsConstSet_T<int64_t>::Command ( eCmd, pArg );
ExprLocatorTraits_t::HandleCommand ( eCmd, pArg );
if ( eCmd==SPH_EXPR_SET_BLOB_POOL )
m_pBlobPool = (const BYTE *)pArg;
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_MVAInU_c");
return CALC_DEP_HASHES_EX(m_uValueHash);
}
ISphExpr * Clone () const final
{
return new Expr_MVAInU_c ( *this );
}
protected:
const BYTE * m_pBlobPool {nullptr};
private:
Expr_MVAInU_c ( const Expr_MVAInU_c & rhs )
: Expr_ArgVsConstSet_T<int64_t> ( rhs )
, ExprLocatorTraits_t ( rhs )
{}
};
/// LENGTH() evaluator for MVAs
class Expr_MVALength_c : public Expr_WithLocator_c
{
public:
Expr_MVALength_c ( const CSphAttrLocator & tLoc, const CSphString & sAttr, bool b64 )
: Expr_WithLocator_c ( tLoc, sAttr )
, m_b64 ( b64 )
{}
int IntEval ( const CSphMatch & tMatch ) const final
{
auto dMva = tMatch.FetchAttrData ( m_tLocator, m_pBlobPool );
return (int)( m_b64 ? dMva.second/sizeof(int64_t) : dMva.second/sizeof(DWORD) );
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
Expr_WithLocator_c::Command ( eCmd, pArg );
if ( eCmd==SPH_EXPR_SET_BLOB_POOL )
m_pBlobPool = (const BYTE*)pArg;
}
float Eval ( const CSphMatch & tMatch ) const final { return (float)IntEval ( tMatch ); }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_MVALength_c");
CALC_POD_HASH(m_b64);
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_MVALength_c ( *this );
}
protected:
bool m_b64;
const BYTE * m_pBlobPool { nullptr };
private:
Expr_MVALength_c ( const Expr_MVALength_c& rhs )
: Expr_WithLocator_c ( rhs )
, m_b64 ( rhs.m_b64 )
{}
};
/// aggregate functions evaluator for MVA attribute
template < typename T >
class Expr_MVAAggr_c : public Expr_WithLocator_c
{
public:
Expr_MVAAggr_c ( const CSphAttrLocator & tLoc, const CSphString & sAttr, ESphAggrFunc eFunc )
: Expr_WithLocator_c ( tLoc, sAttr )
, m_eFunc ( eFunc )
{}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final
{
auto dMva = tMatch.FetchAttrData ( m_tLocator, m_pBlobPool );
if ( !dMva.second )
return 0;
int nValues = dMva.second / sizeof(T);
const T * L = (const T *)dMva.first;
const T * R = L+nValues-1;
switch ( m_eFunc )
{
case SPH_AGGR_MIN: return *L;
case SPH_AGGR_MAX: return *R;
default: return 0;
}
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
Expr_WithLocator_c::Command ( eCmd, pArg );
if ( eCmd==SPH_EXPR_SET_BLOB_POOL )
m_pBlobPool = (const BYTE *)pArg;
}
float Eval ( const CSphMatch & tMatch ) const final { return (float)Int64Eval ( tMatch ); }
int IntEval ( const CSphMatch & tMatch ) const final { return (int)Int64Eval ( tMatch ); }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_MVAAggr_c");
CALC_POD_HASH(m_eFunc);
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_MVAAggr_c ( *this );
}
protected:
const BYTE * m_pBlobPool {nullptr};
ESphAggrFunc m_eFunc {SPH_AGGR_NONE};
private:
Expr_MVAAggr_c ( const Expr_MVAAggr_c& rhs )
: Expr_WithLocator_c ( rhs )
, m_eFunc ( rhs.m_eFunc )
{}
};
/// IN() evaluator, JSON array vs. constant values
class Expr_JsonFieldIn_c : public Expr_ArgVsConstSet_T<int64_t>
{
public:
Expr_JsonFieldIn_c ( ConstList_c * pConsts, ISphExpr * pArg, ESphCollation eCollation )
: Expr_ArgVsConstSet_T<int64_t> ( pArg, pConsts, true )
, m_fnHashCalc ( GetStringHashCalcFunc(eCollation) )
{
assert ( pConsts );
const char * szExpr = pConsts->m_sExpr.first;
int iExprLen = pConsts->m_sExpr.second;
if ( pConsts->m_bPackedStrings )
{
assert(m_fnHashCalc);
for ( int64_t iVal : m_dValues )
{
auto iOfs = GetConstStrOffset ( iVal );
auto iLen = GetConstStrLength ( iVal );
if ( iOfs>0 && iLen>0 && iOfs+iLen<=iExprLen )
{
auto tRes = SqlUnescapeN ( szExpr + iOfs, iLen );
int iLen = tRes.second;
m_dHashes.Add ( iLen ? m_fnHashCalc ( (const BYTE*)tRes.first.cstr(), iLen, SPH_FNV64_SEED ) : 0 );
m_dStrings.Add ( tRes.first );
}
}
m_dHashes.Sort();
}
}
Expr_JsonFieldIn_c ( const UservarIntSet_c & pUserVar, ISphExpr * pArg, ESphCollation eCollation )
: Expr_ArgVsConstSet_T<int64_t> ( pArg, pUserVar )
, m_fnHashCalc ( GetStringHashCalcFunc(eCollation) )
{
assert ( pUserVar );
m_dHashes.Sort();
}
Expr_JsonFieldIn_c ( const VecTraits_T<CSphString> & dVals, ISphExpr * pArg, ESphCollation eCollation )
: Expr_ArgVsConstSet_T<int64_t> ( pArg )
, m_fnHashCalc ( GetStringHashCalcFunc(eCollation) )
{
m_dHashes.Resize ( dVals.GetLength() );
m_dStrings.Resize ( dVals.GetLength() );
m_uValueHash = SPH_FNV64_SEED;
assert(m_fnHashCalc);
ARRAY_FOREACH ( i, dVals )
{
const CSphString & sVal = dVals[i];
m_dStrings[i] = sVal;
int iLen = sVal.Length();
m_dHashes[i] = iLen ? m_fnHashCalc ( (const BYTE*)sVal.cstr(), iLen, SPH_FNV64_SEED ) : 0;
m_uValueHash = sphFNV64cont ( sVal.cstr(), m_uValueHash );
}
m_dHashes.Uniq();
}
Expr_JsonFieldIn_c ( const VecTraits_T<int64_t> & dVals, ISphExpr * pArg, ESphCollation eCollation )
: Expr_ArgVsConstSet_T<int64_t> ( pArg )
, m_fnHashCalc ( GetStringHashCalcFunc(eCollation) )
{
m_dValues.Resize ( dVals.GetLength() );
ARRAY_FOREACH ( i, dVals )
m_dValues[i] = dVals[i];
m_dValues.Uniq();
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
Expr_ArgVsConstSet_T<int64_t>::Command ( eCmd, pArg );
if ( eCmd==SPH_EXPR_SET_BLOB_POOL )
m_pBlobPool = (const BYTE*)pArg;
}
/// evaluate arg, check if any values are within set
int IntEval ( const CSphMatch & tMatch ) const final
{
const BYTE * pVal = nullptr;
ESphJsonType eJson = GetKey ( &pVal, tMatch );
int64_t iVal = 0;
switch ( eJson )
{
case JSON_INT32_VECTOR: return ArrayEval<int> ( pVal );
case JSON_INT64_VECTOR: return ArrayEval<int64_t> ( pVal );
case JSON_STRING_VECTOR: return StringArrayEval ( pVal, false );
case JSON_DOUBLE_VECTOR: return ArrayFloatEval ( pVal );
case JSON_STRING: return StringArrayEval ( pVal, true );
case JSON_INT32:
case JSON_INT64:
iVal = ( eJson==JSON_INT32 ? sphJsonLoadInt ( &pVal ) : sphJsonLoadBigint ( &pVal ) );
if ( m_bFloat )
return FloatEval ( (float)iVal );
else
return ValueEval ( iVal );
case JSON_DOUBLE:
iVal = sphJsonLoadBigint ( &pVal );
if ( m_bFloat )
return FloatEval ( sphQW2D ( iVal ) );
else
return ValueEval ( iVal );
case JSON_TRUE: return ValueEval(1);
case JSON_FALSE: return ValueEval(0);
case JSON_MIXED_VECTOR:
{
const BYTE * p = pVal;
sphJsonUnpackInt ( &p ); // skip node length
int iLen = sphJsonUnpackInt ( &p );
for ( int i=0; i<iLen; i++ )
{
auto eType = (ESphJsonType)*p++;
pVal = p;
int iRes = 0;
switch (eType)
{
case JSON_STRING:
iRes = StringArrayEval ( pVal, true );
break;
case JSON_INT32:
case JSON_INT64:
iVal = ( eType==JSON_INT32 ? sphJsonLoadInt ( &pVal ) : sphJsonLoadBigint ( &pVal ) );
if ( m_bFloat )
iRes = FloatEval ( (float)iVal );
else
iRes = ValueEval ( iVal );
break;
case JSON_DOUBLE:
iVal = sphJsonLoadBigint ( &pVal );
if ( m_bFloat )
iRes = FloatEval ( sphQW2D ( iVal ) );
else
iRes = ValueEval ( iVal );
break;
default: break; // for weird subobjects, just let IN() return false
}
if ( iRes )
return 1;
sphJsonSkipNode ( eType, &p );
}
return 0;
}
default: return 0;
}
}
bool SetupAsFilter ( CSphFilterSettings & tFilter, const ISphSchema & tSchema, const SIContainer_c & tSI ) const override
{
bool bExclude = tFilter.m_bExclude;
if ( !CanAliasedExprSetupAsFilter ( tFilter, bExclude ) )
return false;
if ( !m_dValues.GetLength() )
return false;
std::pair<const ISphSchema*,CSphString> tSchemaWithName;
tSchemaWithName.first = &tSchema;
m_pArg->Command ( SPH_EXPR_FORMAT_AS_TEXT, (void*)&tSchemaWithName );
if ( tSchemaWithName.second.IsEmpty() )
return false;
if ( !tSI.IsEnabled ( tSchemaWithName.second ) )
return false;
tFilter.m_bExclude = bExclude;
if ( m_dStrings.IsEmpty() )
{
tFilter.m_dValues.Resize(0);
for ( auto i : m_dValues )
tFilter.m_dValues.Add(i);
}
else
{
tFilter.m_eType = m_dStrings.GetLength()==1 ? SPH_FILTER_STRING : SPH_FILTER_STRING_LIST;
tFilter.m_dStrings = m_dStrings;
}
tFilter.m_sAttrName = tSchemaWithName.second;
return true;
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_JsonFieldIn_c");
return CALC_PARENT_HASH_EX(m_uValueHash);
}
ISphExpr * Clone () const final
{
return new Expr_JsonFieldIn_c ( *this );
}
protected:
const BYTE * m_pBlobPool {nullptr};
CSphVector<int64_t> m_dHashes;
StrVec_t m_dStrings;
StrHashCalc_fn m_fnHashCalc = nullptr;
ESphJsonType GetKey ( const BYTE ** ppKey, const CSphMatch & tMatch ) const
{
return ::GetKey ( ppKey, tMatch, m_pBlobPool, m_pArg );
}
int ValueEval ( const int64_t iVal ) const
{
for ( int64_t iValue: m_dValues )
if ( iVal==iValue )
return 1;
return 0;
}
int FloatEval ( const double fVal ) const
{
assert ( m_bFloat );
for ( int64_t iFilterVal : m_dValues )
{
double fFilterVal = sphDW2F ( (DWORD)iFilterVal );
if ( fabs ( fVal - fFilterVal )<=1e-6 )
return 1;
}
return 0;
}
// cannot apply MvaEval() on unordered JSON arrays, using linear search
template <typename T>
int ArrayEval ( const BYTE * pVal ) const
{
int iLen = sphJsonUnpackInt ( &pVal );
auto * pArray = (const T *)pVal;
const T * pArrayMax = pArray+iLen;
for ( int64_t dValue : m_dValues )
{
auto iVal = (T)dValue;
for ( const T * m = pArray; m<pArrayMax; ++m )
if ( iVal==*m )
return 1;
}
return 0;
}
int StringArrayEval ( const BYTE * pVal, bool bValueEval ) const
{
if ( !bValueEval )
sphJsonUnpackInt ( &pVal );
int iCount = bValueEval ? 1 : sphJsonUnpackInt ( &pVal );
assert(m_fnHashCalc);
while ( iCount-- )
{
int iLen = sphJsonUnpackInt ( &pVal );
if ( m_dHashes.BinarySearch ( iLen ? m_fnHashCalc ( pVal, iLen, SPH_FNV64_SEED ) : 0 ) )
return 1;
pVal += iLen;
}
return 0;
}
int ArrayFloatEval ( const BYTE * pVal ) const
{
int iLen = sphJsonUnpackInt ( &pVal );
for ( int64_t iFilterVal : m_dValues )
{
double fFilterVal = ( m_bFloat ? sphDW2F ( (DWORD)iFilterVal ) : iFilterVal );
const BYTE * p = pVal;
for ( int i=0; i<iLen; i++ )
{
double fStored = sphQW2D ( sphJsonLoadBigint ( &p ) );
if ( fabs ( fStored - fFilterVal )<=1e-6 )
return 1;
}
}
return 0;
}
bool IsJson ( bool & bConverted ) const final
{
bConverted = true;
return true;
}
private:
Expr_JsonFieldIn_c ( const Expr_JsonFieldIn_c& rhs )
: Expr_ArgVsConstSet_T<int64_t> ( rhs )
, m_dHashes ( rhs.m_dHashes )
, m_dStrings ( rhs.m_dStrings )
, m_fnHashCalc ( rhs.m_fnHashCalc )
{}
};
ISphExpr * ExprJsonIn ( const VecTraits_T<CSphString> & dVals, ISphExpr * pArg, ESphCollation eCollation )
{
return new Expr_JsonFieldIn_c ( dVals, pArg, eCollation );
}
ISphExpr * ExprJsonIn ( const VecTraits_T<int64_t> & dVals, ISphExpr * pArg, ESphCollation eCollation )
{
return new Expr_JsonFieldIn_c ( dVals, pArg, eCollation );
}
////////////////////////////////////////////////////////////////////
/// JSON field vs constant range
class Expr_JsonFieldRange_c : public Expr_ArgVsSet_T<int64_t>
{
using BASE = Expr_ArgVsSet_T<int64_t>;
public:
Expr_JsonFieldRange_c ( const CommonFilterSettings_t & tFilter, ISphExpr * pArg )
: BASE ( pArg )
, m_tFilter ( tFilter )
{}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
BASE::Command ( eCmd, pArg );
if ( eCmd==SPH_EXPR_SET_BLOB_POOL )
m_pBlobPool = (const BYTE*)pArg;
}
int IntEval ( const CSphMatch & tMatch ) const final
{
const BYTE * pVal = nullptr;
ESphJsonType eJson = GetKey ( &pVal, tMatch );
switch ( eJson )
{
case JSON_INT32_VECTOR: return ArrayEval<int>(pVal);
case JSON_INT64_VECTOR: return ArrayEval<int64_t>(pVal);
case JSON_DOUBLE_VECTOR: return ArrayFloatEval(pVal);
case JSON_INT32:
case JSON_INT64: return ValueEval ( eJson==JSON_INT32 ? sphJsonLoadInt ( &pVal ) : sphJsonLoadBigint ( &pVal ) );
case JSON_DOUBLE: return ValueEval ( sphQW2D ( sphJsonLoadBigint ( &pVal ) ) );
default:
return 0;
}
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_JsonFieldRange_c");
CALC_POD_HASH ( m_tFilter.m_iMinValue );
CALC_POD_HASH ( m_tFilter.m_iMaxValue );
CALC_POD_HASH ( m_tFilter.m_bHasEqualMin );
CALC_POD_HASH ( m_tFilter.m_bHasEqualMax );
CALC_POD_HASH ( m_tFilter.m_bOpenLeft );
CALC_POD_HASH ( m_tFilter.m_bOpenRight );
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_JsonFieldRange_c ( *this );
}
protected:
const BYTE * m_pBlobPool {nullptr};
CommonFilterSettings_t m_tFilter;
ESphJsonType GetKey ( const BYTE ** ppKey, const CSphMatch & tMatch ) const
{
return ::GetKey ( ppKey, tMatch, m_pBlobPool, m_pArg );
}
template<typename T>
int ValueEval ( T tVal ) const
{
if ( m_tFilter.m_eType==SPH_FILTER_FLOATRANGE )
{
if ( EvalRange ( (float)tVal, m_tFilter ) )
return 1;
}
else
{
if ( EvalRange ( tVal, m_tFilter ) )
return 1;
}
return 0;
}
template <typename T>
int ArrayEval ( const BYTE * pVal ) const
{
int iLen = sphJsonUnpackInt ( &pVal );
auto * pArray = (const T *)pVal;
if ( m_tFilter.m_eType==SPH_FILTER_FLOATRANGE )
{
for ( const T * pValue = pArray; pValue < pArray+iLen; pValue++ )
if ( EvalRange ( (float)*pValue, m_tFilter ) )
return 1;
}
else
{
for ( const T * pValue = pArray; pValue < pArray+iLen; pValue++ )
if ( EvalRange ( *pValue, m_tFilter ) )
return 1;
}
return 0;
}
int ArrayFloatEval ( const BYTE * pVal ) const
{
int iLen = sphJsonUnpackInt ( &pVal );
const BYTE * p = pVal;
for ( int i=0; i<iLen; i++ )
if ( EvalRange ( sphQW2D ( sphJsonLoadBigint ( &p ) ), m_tFilter ) )
return 1;
return 0;
}
bool IsJson ( bool & bConverted ) const final
{
bConverted = true;
return true;
}
private:
Expr_JsonFieldRange_c ( const Expr_JsonFieldRange_c & rhs )
: Expr_ArgVsSet_T<int64_t> ( rhs )
, m_tFilter ( rhs.m_tFilter )
{}
};
ISphExpr * ExprJsonRange ( const CommonFilterSettings_t & tFilter, ISphExpr * pArg )
{
return new Expr_JsonFieldRange_c ( tFilter, pArg );
}
////////////////////////////////////////////////////////////////////
// fixme! Expr_ArgVsConstSet_T collects raw packed strings in the case.
// m.b. store FNV hashes there instead, and use them to boost search speed?
class Expr_StrIn_c : public Expr_ArgVsConstSet_T<int64_t>, public ExprLocatorTraits_t
{
public:
Expr_StrIn_c ( const CSphAttrLocator & tLoc, const CSphString & sAttr, ConstList_c * pConsts, ESphCollation eCollation )
: Expr_ArgVsConstSet_T<int64_t> ( nullptr, pConsts, false )
, ExprLocatorTraits_t ( tLoc, sAttr )
{
assert ( pConsts );
m_fnStrCmp = GetStringCmpFunc ( eCollation );
const char * sExpr = pConsts->m_sExpr.first;
int iExprLen = pConsts->m_sExpr.second;
for ( int64_t iVal : m_dValues )
{
auto iOfs = GetConstStrOffset ( iVal );
auto iLen = GetConstStrLength ( iVal );
if ( iOfs>0 && iOfs+iLen<=iExprLen )
{
auto sRes = SqlUnescape ( sExpr + iOfs, iLen );
m_dStringValues.Add ( sRes );
}
}
}
int IntEval ( const CSphMatch & tMatch ) const final
{
auto tVal = tMatch.FetchAttrData ( m_tLocator, m_pBlobPool );
for ( const auto & tString : m_dStringValues )
if ( m_fnStrCmp ( tVal, ByteBlob_t ( tString ), false )==0 )
return 1;
return 0;
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
Expr_ArgVsConstSet_T<int64_t>::Command ( eCmd, pArg );
ExprLocatorTraits_t::HandleCommand ( eCmd, pArg );
if ( eCmd==SPH_EXPR_SET_BLOB_POOL )
m_pBlobPool = (const BYTE*)pArg;
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_StrIn_c");
CALC_POD_HASH(m_fnStrCmp);
return CALC_PARENT_HASH_EX(m_uValueHash);
}
ISphExpr* Clone() const final
{
return new Expr_StrIn_c ( *this );
}
protected:
const BYTE * m_pBlobPool {nullptr};
SphStringCmp_fn m_fnStrCmp {nullptr};
StrVec_t m_dStringValues;
private:
Expr_StrIn_c ( const Expr_StrIn_c& rhs )
: Expr_ArgVsConstSet_T<int64_t> (rhs)
, ExprLocatorTraits_t (rhs)
, m_fnStrCmp ( rhs.m_fnStrCmp )
, m_dStringValues ( rhs.m_dStringValues )
{}
};
// fixme! m.b. it is better to keep uservar ref instead of deep copy in Expr_ArgVsConstSet_T<int64_t>?
class Expr_StrInU_c : public Expr_ArgVsConstSet_T<int64_t>, public ExprLocatorTraits_t
{
public:
Expr_StrInU_c ( const CSphAttrLocator & tLoc, const CSphString & sAttr, const UservarIntSet_c& pUservar, ESphCollation eCollation )
: Expr_ArgVsConstSet_T<int64_t> ( nullptr, pUservar )
, ExprLocatorTraits_t ( tLoc, sAttr )
, m_fnStrCmp ( GetStringCmpFunc ( eCollation ))
{
assert ( pUservar );
}
int IntEval ( const CSphMatch & tMatch ) const final
{
return 0;
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
Expr_ArgVsConstSet_T<int64_t>::Command ( eCmd, pArg );
ExprLocatorTraits_t::HandleCommand ( eCmd, pArg );
if ( eCmd==SPH_EXPR_SET_BLOB_POOL )
m_pBlobPool = (const BYTE*)pArg;
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_StrInU_c");
CALC_POD_HASH(m_fnStrCmp);
return CALC_PARENT_HASH_EX(m_uValueHash);
}
ISphExpr * Clone () const final
{
return new Expr_StrInU_c ( *this );
}
protected:
const BYTE * m_pBlobPool {nullptr};
SphStringCmp_fn m_fnStrCmp {nullptr};
private:
Expr_StrInU_c ( const Expr_StrInU_c& rhs )
: Expr_ArgVsConstSet_T<int64_t> (rhs)
, ExprLocatorTraits_t (rhs)
, m_fnStrCmp ( rhs.m_fnStrCmp )
{}
};
//////////////////////////////////////////////////////////////////////////
/// generic BITDOT() evaluator
/// first argument is a bit mask and the rest ones are bit weights
/// function returns sum of bits multiplied by their weights
/// BITDOT(5, 11, 33, 55) => 1*11 + 0*33 + 1*55 = 66
/// BITDOT(4, 11, 33, 55) => 0*11 + 0*33 + 1*55 = 55
template < typename T >
class Expr_Bitdot_c : public Expr_ArgVsSet_T<T>
{
public:
/// take ownership of arg and turn points
explicit Expr_Bitdot_c ( const CSphVector<ISphExpr *> & dArgs )
: Expr_ArgVsSet_T<T> ( dArgs[0] )
{
for ( int i = 1; i<dArgs.GetLength (); ++i )
{
SafeAddRef ( dArgs[i] );
m_dBitWeights.Add ( dArgs[i] );
}
}
float Eval ( const CSphMatch & tMatch ) const final
{
return (float) DoEval ( tMatch );
}
int IntEval ( const CSphMatch & tMatch ) const final
{
return (int) DoEval ( tMatch );
}
int64_t Int64Eval ( const CSphMatch & tMatch ) const final
{
return (int64_t) DoEval ( tMatch );
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
Expr_ArgVsSet_T<T>::Command ( eCmd, pArg );
ARRAY_FOREACH ( i, m_dBitWeights )
m_dBitWeights[i]->Command ( eCmd, pArg );
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_Bitdot_c");
CALC_CHILD_HASHES(m_dBitWeights);
return Expr_ArgVsSet_T<T>::CalcHash ( szClassName, tSorterSchema, uHash, bDisable ); // can't do CALC_PARENT_HASH because of gcc and templates
}
ISphExpr* Clone() const final
{
return new Expr_Bitdot_c ( *this );
}
protected:
VecRefPtrs_t<ISphExpr*> m_dBitWeights;
/// generic evaluate
T DoEval ( const CSphMatch & tMatch ) const
{
int64_t uArg = this->m_pArg->Int64Eval ( tMatch ); // 'this' fixes gcc braindamage
T tRes = 0;
int iBit = 0;
while ( uArg && iBit<m_dBitWeights.GetLength() )
{
if ( uArg & 1 )
tRes += Expr_ArgVsSet_T<T>::ExprEval ( m_dBitWeights[iBit], tMatch );
uArg >>= 1;
iBit++;
}
return tRes;
}
private:
Expr_Bitdot_c ( const Expr_Bitdot_c& rhs )
: Expr_ArgVsSet_T<T> ( rhs )
{
m_dBitWeights.Resize ( rhs.m_dBitWeights.GetLength () );
ARRAY_FOREACH ( i, m_dBitWeights )
m_dBitWeights[i] = SafeClone (rhs.m_dBitWeights[i]);
}
};
//////////////////////////////////////////////////////////////////////////
class GeodistTraits_c : public GeoDistSettings_t
{
public:
void HandleCommand ( ESphExprCommand eCmd, void * pArg )
{
switch ( eCmd )
{
case SPH_EXPR_GET_GEODIST_SETTINGS:
{
auto pSettings = (std::pair<GeoDistSettings_t *, bool>*)pArg;
assert ( pSettings );
pSettings->first = this;
pSettings->second = true;
}
break;
case SPH_EXPR_GET_DEPENDENT_COLS:
static_cast<StrVec_t*> ( pArg )->Add ( m_sAttrLat );
static_cast<StrVec_t*> ( pArg )->Add ( m_sAttrLon );
break;
default:
break;
}
}
};
//////////////////////////////////////////////////////////////////////////
/// geodist() - attr point, constant anchor
class Expr_GeodistAttrConst_c : public ISphExpr, public GeodistTraits_c
{
public:
Expr_GeodistAttrConst_c ( Geofunc_fn pFunc, float fOut, CSphAttrLocator tLat, CSphAttrLocator tLon, float fAnchorLat, float fAnchorLon, const CSphString & sAttrLat, const CSphString & sAttrLon )
: m_tLat ( tLat )
, m_tLon ( tLon )
{
m_pFunc = pFunc;
m_fScale = fOut;
m_fAnchorLat = fAnchorLat;
m_fAnchorLon = fAnchorLon;
m_sAttrLat = sAttrLat;
m_sAttrLon = sAttrLon;
}
float Eval ( const CSphMatch & tMatch ) const final
{
return m_fScale*m_pFunc ( tMatch.GetAttrFloat ( m_tLat ), tMatch.GetAttrFloat ( m_tLon ), m_fAnchorLat, m_fAnchorLon );
}
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) final
{
sphFixupLocator ( m_tLat, pOldSchema, pNewSchema );
sphFixupLocator ( m_tLon, pOldSchema, pNewSchema );
}
void Command ( ESphExprCommand eCmd, void * pArg ) final { HandleCommand ( eCmd, pArg ); }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_GeodistAttrConst_c");
CALC_POD_HASH(m_fAnchorLat);
CALC_POD_HASH(m_fAnchorLon);
CALC_POD_HASH(m_fScale);
CALC_POD_HASH(m_pFunc);
return CALC_DEP_HASHES();
}
ISphExpr * Clone() const final { return new Expr_GeodistAttrConst_c ( *this ); }
private:
CSphAttrLocator m_tLat;
CSphAttrLocator m_tLon;
Expr_GeodistAttrConst_c ( const Expr_GeodistAttrConst_c& rhs )
: m_tLat ( rhs.m_tLat )
, m_tLon ( rhs.m_tLon )
{
*((GeoDistSettings_t*)this) = rhs;
}
};
/// geodist() - expr point, constant anchor
class Expr_GeodistConst_c: public ISphExpr, public GeodistTraits_c
{
public:
Expr_GeodistConst_c ( Geofunc_fn pFunc, float fOut, ISphExpr * pLat, ISphExpr * pLon, float fAnchorLat, float fAnchorLon, const CSphString & sAttrLat, const CSphString & sAttrLon )
: m_pLat ( pLat )
, m_pLon ( pLon )
{
SafeAddRef ( pLat );
SafeAddRef ( pLon );
m_pFunc = pFunc;
m_fScale = fOut;
m_fAnchorLat = fAnchorLat;
m_fAnchorLon = fAnchorLon;
m_sAttrLat = sAttrLat;
m_sAttrLon = sAttrLon;
}
float Eval ( const CSphMatch & tMatch ) const final
{
return m_fScale*m_pFunc ( m_pLat->Eval(tMatch), m_pLon->Eval(tMatch), m_fAnchorLat, m_fAnchorLon );
}
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) final
{
m_pLat->FixupLocator ( pOldSchema, pNewSchema );
m_pLon->FixupLocator ( pOldSchema, pNewSchema );
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
m_pLat->Command ( eCmd, pArg );
m_pLon->Command ( eCmd, pArg );
HandleCommand ( eCmd, pArg );
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_GeodistConst_c");
CALC_POD_HASH(m_fAnchorLat);
CALC_POD_HASH(m_fAnchorLon);
CALC_POD_HASH(m_fScale);
CALC_POD_HASH(m_pFunc);
CALC_CHILD_HASH(m_pLat);
CALC_CHILD_HASH(m_pLon);
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_GeodistConst_c ( *this );
}
private:
CSphRefcountedPtr<ISphExpr> m_pLat;
CSphRefcountedPtr<ISphExpr> m_pLon;
Expr_GeodistConst_c ( const Expr_GeodistConst_c& rhs )
: m_pLat ( SafeClone (rhs.m_pLat) )
, m_pLon ( SafeClone (rhs.m_pLon) )
{
*((GeoDistSettings_t*)this) = rhs;
}
};
/// geodist() - expr point, expr anchor
class Expr_Geodist_c: public ISphExpr
{
public:
Expr_Geodist_c ( Geofunc_fn pFunc, float fOut, ISphExpr * pLat, ISphExpr * pLon, ISphExpr * pAnchorLat, ISphExpr * pAnchorLon )
: m_pFunc ( pFunc )
, m_fOut ( fOut )
, m_pLat ( pLat )
, m_pLon ( pLon )
, m_pAnchorLat ( pAnchorLat )
, m_pAnchorLon ( pAnchorLon )
{
SafeAddRef ( pLat );
SafeAddRef ( pLon );
SafeAddRef ( pAnchorLat );
SafeAddRef ( pAnchorLon );
}
float Eval ( const CSphMatch & tMatch ) const final
{
return m_fOut*m_pFunc ( m_pLat->Eval(tMatch), m_pLon->Eval(tMatch), m_pAnchorLat->Eval(tMatch), m_pAnchorLon->Eval(tMatch) );
}
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) final
{
m_pLat->FixupLocator ( pOldSchema, pNewSchema );
m_pLon->FixupLocator ( pOldSchema, pNewSchema );
m_pAnchorLat->FixupLocator ( pOldSchema, pNewSchema );
m_pAnchorLon->FixupLocator ( pOldSchema, pNewSchema );
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
m_pLat->Command ( eCmd, pArg );
m_pLon->Command ( eCmd, pArg );
m_pAnchorLat->Command ( eCmd, pArg );
m_pAnchorLon->Command ( eCmd, pArg );
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_Geodist_c");
CALC_POD_HASH(m_fOut);
CALC_POD_HASH(m_pFunc);
CALC_CHILD_HASH(m_pLat);
CALC_CHILD_HASH(m_pLon);
CALC_CHILD_HASH(m_pAnchorLat);
CALC_CHILD_HASH(m_pAnchorLon);
return CALC_DEP_HASHES();
}
ISphExpr * Clone () const final
{
return new Expr_Geodist_c ( *this );
}
private:
Geofunc_fn m_pFunc;
float m_fOut;
CSphRefcountedPtr<ISphExpr> m_pLat;
CSphRefcountedPtr<ISphExpr> m_pLon;
CSphRefcountedPtr<ISphExpr> m_pAnchorLat;
CSphRefcountedPtr<ISphExpr> m_pAnchorLon;
Expr_Geodist_c ( const Expr_Geodist_c& rhs )
: m_pFunc ( rhs.m_pFunc )
, m_fOut ( rhs.m_fOut )
, m_pLat ( SafeClone (rhs.m_pLat) )
, m_pLon ( SafeClone (rhs.m_pLon) )
, m_pAnchorLat ( SafeClone (rhs.m_pAnchorLat) )
, m_pAnchorLon ( SafeClone (rhs.m_pAnchorLon) )
{}
};
class Expr_Regex_c final : public Expr_ArgVsSet_T<int>
{
public:
Expr_Regex_c ( ISphExpr * pAttr, ISphExpr * pString )
: Expr_ArgVsSet_T ( pAttr )
{
CSphMatch tTmp;
const BYTE * sVal = nullptr;
int iLen = pString->StringEval ( tTmp, &sVal );
m_sRegex = CSphString ( (const char*)sVal, iLen );
if ( iLen )
m_uFilterHash = sphFNV64 ( sVal, iLen );
SetupRE2();
}
#if WITH_RE2
RE2 * GetRE2() const { return m_pRE2; }
#endif
~Expr_Regex_c() final
{
#if WITH_RE2
SafeDelete ( m_pRE2 );
#endif
}
int IntEval ( const CSphMatch & tMatch ) const final
{
int iRes = 0;
#if WITH_RE2
if ( !m_pRE2 )
return 0;
const BYTE * sVal = nullptr;
int iLen = m_pArg->StringEval ( tMatch, &sVal );
re2::StringPiece tBuf ( (const char *)sVal, iLen );
iRes = !!( RE2::PartialMatchN ( tBuf, *m_pRE2, nullptr, 0 ) );
if ( m_pArg->IsDataPtrAttr () ) SafeDeleteArray ( sVal );
#endif
return iRes;
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("Expr_Regex_c");
uHash ^= m_uFilterHash;
return CALC_DEP_HASHES();
}
ISphExpr* Clone() const final
{
return new Expr_Regex_c ( *this );
}
protected:
CSphString m_sRegex;
uint64_t m_uFilterHash = SPH_FNV64_SEED;
#if WITH_RE2
RE2 * m_pRE2 = nullptr;
#endif
void SetupRE2()
{
#if WITH_RE2
SafeDelete(m_pRE2);
re2::StringPiece tBuf ( m_sRegex.cstr(), m_sRegex.Length() );
RE2::Options tOpts;
tOpts.set_encoding ( RE2::Options::Encoding::EncodingUTF8 );
m_pRE2 = new RE2 ( tBuf, tOpts );
#endif
}
private:
Expr_Regex_c ( const Expr_Regex_c & rhs )
: Expr_ArgVsSet_T ( rhs )
, m_sRegex ( rhs.m_sRegex )
, m_uFilterHash ( rhs.m_uFilterHash )
{
SetupRE2();
}
};
//////////////////////////////////////////////////////////////////////////
struct DistanceUnit_t
{
CSphString m_dNames[3];
float m_fConversion;
};
bool sphGeoDistanceUnit ( const char * szUnit, float & fCoeff )
{
static DistanceUnit_t dUnits[] =
{
{ { "mi", "miles" }, 1609.34f },
{ { "yd", "yards" }, 0.9144f },
{ { "ft", "feet" }, 0.3048f },
{ { "in", "inch" }, 0.0254f },
{ { "km", "kilometers" }, 1000.0f },
{ { "m", "meters" }, 1.0f },
{ { "cm", "centimeters" }, 0.01f },
{ { "mm", "millimeters" }, 0.001f },
{ { "NM", "nmi", "nauticalmiles" }, 1852.0f }
};
if ( !szUnit || !*szUnit )
{
fCoeff = 1.0f;
return true;
}
for ( const auto & i : dUnits )
for ( const auto & j : i.m_dNames )
if ( j==szUnit )
{
fCoeff = i.m_fConversion;
return true;
}
return false;
}
CSphVector<int> ExprParser_t::GatherArgTypes ( int iNode )
{
CSphVector<int> dTypes;
GatherArgFN ( iNode, [this, &dTypes] ( int i ) { dTypes.Add ( m_dNodes[i].m_iToken ); } );
return dTypes;
}
CSphVector<int> ExprParser_t::GatherArgNodes ( int iNode )
{
CSphVector<int> dNodes;
GatherArgFN ( iNode, [&dNodes] ( int i ) { dNodes.Add ( i ); } );
return dNodes;
}
void ExprParser_t::GatherArgRetTypes ( int iNode, CSphVector<ESphAttr> & dTypes )
{
GatherArgFN ( iNode, [this, &dTypes] ( int i ) { dTypes.Add ( m_dNodes[i].m_eRetType ); } );
}
template < typename FN >
void ExprParser_t::GatherArgFN ( int iNode, FN && fnFunctor )
{
if ( iNode<0 )
return;
m_dGatherStack.Resize ( 0 );
StackNode_t & tInitial = m_dGatherStack.Add();
const ExprNode_t & tNode = m_dNodes[iNode];
tInitial.m_iNode = iNode;
tInitial.m_iLeft = tNode.m_iLeft;
tInitial.m_iRight = tNode.m_iRight;
while ( !m_dGatherStack.IsEmpty() )
{
StackNode_t & tCur = m_dGatherStack.Last();
if ( m_dNodes[tCur.m_iNode].m_iToken!=',' )
{
fnFunctor ( tCur.m_iNode );
m_dGatherStack.Pop();
continue;
}
if ( tCur.m_iLeft==-1 && tCur.m_iRight==-1 )
{
m_dGatherStack.Pop();
continue;
}
int iChild = -1;
if ( tCur.m_iLeft>=0 )
Swap ( iChild, tCur.m_iLeft );
else if ( tCur.m_iRight>=0 )
Swap ( iChild, tCur.m_iRight );
else
continue;
assert ( iChild>=0 );
const ExprNode_t & tChild = m_dNodes[iChild];
StackNode_t & tNext = m_dGatherStack.Add();
tNext.m_iNode = iChild;
tNext.m_iLeft = tChild.m_iLeft;
tNext.m_iRight = tChild.m_iRight;
}
}
bool ExprParser_t::CheckForConstSet ( int iArgsNode, int iSkip )
{
CSphVector<int> dTypes = GatherArgTypes ( iArgsNode );
return dTypes.Slice ( iSkip ).all_of (
[] ( int t ) { return t==TOK_CONST_INT || t==TOK_CONST_FLOAT || t==TOK_MAP_ARG; } );
}
template < typename T >
void ExprParser_t::WalkTree ( int iRoot, T & FUNCTOR )
{
if ( iRoot>=0 )
{
const ExprNode_t & tNode = m_dNodes[iRoot];
FUNCTOR.Enter ( tNode, m_dNodes );
WalkTree ( tNode.m_iLeft, FUNCTOR );
WalkTree ( tNode.m_iRight, FUNCTOR );
FUNCTOR.Exit ( tNode );
}
}
ISphExpr * ExprParser_t::CreateIntervalNode ( int iArgsNode, CSphVector<ISphExpr *> & dArgs )
{
assert ( dArgs.GetLength()>=2 );
CSphVector<ESphAttr> dTypes;
GatherArgRetTypes ( iArgsNode, dTypes );
// force type conversion, where possible
if ( dTypes[0]==SPH_ATTR_JSON_FIELD )
{
auto pConverted = new Expr_JsonFieldConv_c ( dArgs[0] );
SafeRelease ( dArgs[0] );
dArgs[0] = pConverted;
}
bool bConst = CheckForConstSet ( iArgsNode, 1 );
ESphAttr eAttrType = m_dNodes[iArgsNode].m_eArgType;
if ( bConst )
{
switch ( eAttrType )
{
case SPH_ATTR_INTEGER: return new Expr_IntervalConst_c<int> ( dArgs );
case SPH_ATTR_BIGINT: return new Expr_IntervalConst_c<int64_t> ( dArgs );
default: return new Expr_IntervalConst_c<float> ( dArgs );
}
} else
{
switch ( eAttrType )
{
case SPH_ATTR_INTEGER: return new Expr_Interval_c<int> ( dArgs );
case SPH_ATTR_BIGINT: return new Expr_Interval_c<int64_t> ( dArgs );
default: return new Expr_Interval_c<float> ( dArgs );
}
}
#if !_WIN32
return nullptr;
#endif
}
ISphExpr * ExprParser_t::CreateInNode ( int iNode )
{
const ExprNode_t & tLeft = m_dNodes[m_dNodes[iNode].m_iLeft];
const ExprNode_t & tRight = m_dNodes[m_dNodes[iNode].m_iRight];
switch ( tRight.m_iToken )
{
// create IN(arg,constlist)
case TOK_CONST_LIST:
switch ( tLeft.m_iToken )
{
case TOK_ATTR_MVA32: return new Expr_MVAIn_c<DWORD> ( tLeft.m_tLocator, GetNameByLocator(tLeft), tRight.m_pConsts );
case TOK_ATTR_MVA64: return new Expr_MVAIn_c<int64_t> ( tLeft.m_tLocator, GetNameByLocator(tLeft), tRight.m_pConsts );
case TOK_ATTR_STRING: return new Expr_StrIn_c ( tLeft.m_tLocator, GetNameByLocator(tLeft), tRight.m_pConsts, m_eCollation );
case TOK_ATTR_JSON: return new Expr_JsonFieldIn_c ( tRight.m_pConsts, CSphRefcountedPtr<ISphExpr> { CreateTree ( m_dNodes [ iNode ].m_iLeft ) }, m_eCollation );
case TOK_COLUMNAR_UINT32SET:return CreateExpr_ColumnarMva32In ( GetNameByLocator(tLeft), tRight.m_pConsts );
case TOK_COLUMNAR_INT64SET: return CreateExpr_ColumnarMva64In ( GetNameByLocator(tLeft), tRight.m_pConsts );
case TOK_COLUMNAR_STRING: return CreateExpr_ColumnarStringIn ( GetNameByLocator(tLeft), tRight.m_pConsts, m_eCollation );
default:
{
CSphRefcountedPtr<ISphExpr> pArg ( CreateTree ( m_dNodes[iNode].m_iLeft ) );
int iConsts = tRight.m_pConsts->m_eRetType==SPH_ATTR_INTEGER ? tRight.m_pConsts->m_dInts.GetLength() : tRight.m_pConsts->m_dFloats.GetLength();
bool bBinary = iConsts>128;
switch ( WidestType ( tLeft.m_eRetType, tRight.m_pConsts->m_eRetType ) )
{
case SPH_ATTR_INTEGER:
if ( bBinary )
return new Expr_In_c<int,true> ( pArg, tRight.m_pConsts );
else
return new Expr_In_c<int,false> ( pArg, tRight.m_pConsts );
case SPH_ATTR_BIGINT:
if ( bBinary )
return new Expr_In_c<int64_t,true> ( pArg, tRight.m_pConsts );
else
return new Expr_In_c<int64_t,false> ( pArg, tRight.m_pConsts );
default:
if ( bBinary )
return new Expr_In_c<float,true> ( pArg, tRight.m_pConsts );
else
return new Expr_In_c<float,false> ( pArg, tRight.m_pConsts );
}
}
}
// create IN(arg,uservar)
case TOK_USERVAR:
{
if ( !UservarsAvailable() )
{
m_sCreateError.SetSprintf ( "internal error: no uservars hook" );
return nullptr;
}
UservarIntSet_c pUservar = Uservars ( m_dUservars[(int)tRight.m_iConst] );
if ( !pUservar )
{
m_sCreateError.SetSprintf ( "undefined user variable '%s'", m_dUservars[(int)tRight.m_iConst].cstr() );
return nullptr;
}
switch ( tLeft.m_iToken )
{
case TOK_ATTR_MVA32: return new Expr_MVAInU_c<DWORD> ( tLeft.m_tLocator, GetNameByLocator(tLeft), pUservar );
case TOK_ATTR_MVA64: return new Expr_MVAInU_c<int64_t> ( tLeft.m_tLocator, GetNameByLocator(tLeft), pUservar );
case TOK_ATTR_STRING: return new Expr_StrInU_c ( tLeft.m_tLocator, GetNameByLocator(tLeft), pUservar, m_eCollation );
case TOK_ATTR_JSON: return new Expr_JsonFieldIn_c ( pUservar, CSphRefcountedPtr<ISphExpr> { CreateTree ( m_dNodes[iNode].m_iLeft ) }, m_eCollation );
default: return new Expr_InUservar_c ( CSphRefcountedPtr<ISphExpr> { CreateTree ( m_dNodes[iNode].m_iLeft ) }, pUservar );
}
}
// oops, unhandled case
default:
m_sCreateError = "IN() arguments must be constants (except the 1st one)";
}
return nullptr;
}
ISphExpr * ExprParser_t::CreateLengthNode ( const ExprNode_t & tNode, ISphExpr * pLeft )
{
const ExprNode_t & tLeft = m_dNodes [ tNode.m_iLeft ];
switch ( tLeft.m_iToken )
{
case TOK_FUNC:
case TOK_ATTR_STRING: return new Expr_StrLength_c(pLeft);
case TOK_ATTR_MVA32:
case TOK_ATTR_MVA64: return new Expr_MVALength_c ( tLeft.m_tLocator, m_pSchema->GetAttr(tLeft.m_iLocator).m_sName, tLeft.m_iToken==TOK_ATTR_MVA64 );
case TOK_COLUMNAR_UINT32SET: return CreateExpr_ColumnarMva32Length ( m_pSchema->GetAttr(tLeft.m_iLocator).m_sName );
case TOK_COLUMNAR_INT64SET: return CreateExpr_ColumnarMva64Length ( m_pSchema->GetAttr(tLeft.m_iLocator).m_sName );
case TOK_COLUMNAR_STRING: return CreateExpr_ColumnarStringLength ( m_pSchema->GetAttr(tLeft.m_iLocator).m_sName );
case TOK_ATTR_JSON: return new Expr_JsonFieldLength_c ( pLeft );
default:
m_sCreateError = "LENGTH() argument must be MVA or JSON or STRING field";
return nullptr;
}
}
ISphExpr * ExprParser_t::CreateGeodistNode ( int iArgs )
{
CSphVector<int> dArgs = GatherArgNodes ( iArgs );
assert ( dArgs.GetLength()==4 || dArgs.GetLength()==5 );
float fOut = 1.0f; // result scale, defaults to out=meters
bool bDeg = false; // arg units, defaults to in=radians
GeoFunc_e eMethod = GEO_ADAPTIVE; // geodist function to use, defaults to adaptive
if ( dArgs.GetLength()==5 )
{
assert ( m_dNodes [ dArgs[4] ].m_eRetType==SPH_ATTR_MAPARG );
// FIXME! handle errors in options somehow?
for ( const auto& t : m_dNodes[dArgs[4]].m_pMapArg->m_dPairs )
{
if ( t.m_sKey=="in" )
{
if ( t.m_sValue=="deg" || t.m_sValue=="degrees" )
bDeg = true;
else if ( t.m_sValue=="rad" || t.m_sValue=="radians" )
bDeg = false;
} else if ( t.m_sKey=="out" )
{
float fCoeff = 1.0f;
if ( sphGeoDistanceUnit ( t.m_sValue.cstr(), fCoeff ) )
fOut = 1.0f / fCoeff;
} else if ( t.m_sKey=="method" )
{
if ( t.m_sValue=="haversine" )
eMethod = GEO_HAVERSINE;
else if ( t.m_sValue=="adaptive" )
eMethod = GEO_ADAPTIVE;
}
}
}
bool bConst1 = ( IsConst ( &m_dNodes[dArgs[0]] ) && IsConst ( &m_dNodes[dArgs[1]] ) );
bool bConst2 = ( IsConst ( &m_dNodes[dArgs[2]] ) && IsConst ( &m_dNodes[dArgs[3]] ) );
if ( bConst1 && bConst2 )
{
float t[4];
for ( int i=0; i<4; i++ )
t[i] = FloatVal ( &m_dNodes[dArgs[i]] );
return new Expr_GetConst_c ( fOut*CalcGeodist ( eMethod, bDeg, t[0], t[1], t[2], t[3] ) );
}
if ( bConst1 )
{
Swap ( dArgs[0], dArgs[2] );
Swap ( dArgs[1], dArgs[3] );
Swap ( bConst1, bConst2 );
}
if ( bConst2 )
{
CSphString sLat = GetNameByLocator(dArgs[0]);
CSphString sLon = GetNameByLocator(dArgs[1]);
// constant anchor
if ( m_dNodes[dArgs[0]].m_iToken==TOK_ATTR_FLOAT && m_dNodes[dArgs[1]].m_iToken==TOK_ATTR_FLOAT )
{
// attr point
return new Expr_GeodistAttrConst_c ( GetGeodistFn ( eMethod, bDeg ), fOut,
m_dNodes[dArgs[0]].m_tLocator, m_dNodes[dArgs[1]].m_tLocator,
FloatVal ( &m_dNodes[dArgs[2]] ), FloatVal ( &m_dNodes[dArgs[3]] ),
sLat, sLon );
} else
{
CSphRefcountedPtr<ISphExpr> pAttr0 { ConvertExprJson ( CreateTree ( dArgs[0] ) ) };
CSphRefcountedPtr<ISphExpr> pAttr1 { ConvertExprJson ( CreateTree ( dArgs[1] ) ) };
// expr point
return new Expr_GeodistConst_c ( GetGeodistFn ( eMethod, bDeg ), fOut,
pAttr0, pAttr1,
FloatVal ( &m_dNodes[dArgs[2]] ), FloatVal ( &m_dNodes[dArgs[3]] ),
sLat, sLon );
}
}
// four expressions
VecRefPtrs_t<ISphExpr*> dExpr;
MoveToArgList ( CreateTree ( iArgs ), dExpr );
assert ( dExpr.GetLength()==4 );
ConvertArgsJson ( dExpr );
return new Expr_Geodist_c ( GetGeodistFn ( eMethod, bDeg ), fOut, dExpr[0], dExpr[1], dExpr[2], dExpr[3] );
}
ISphExpr * ExprParser_t::CreatePFNode ( int iArg )
{
m_eEvalStage = SPH_EVAL_FINAL;
DWORD uNodeFactorFlags = SPH_FACTOR_ENABLE | SPH_FACTOR_CALC_ATC;
CSphVector<int> dArgs = GatherArgNodes ( iArg );
assert ( dArgs.GetLength()==0 || dArgs.GetLength()==1 );
bool bNoATC = false;
bool bJsonOut = false;
if ( dArgs.GetLength()==1 )
{
assert ( m_dNodes[dArgs[0]].m_eRetType==SPH_ATTR_MAPARG );
for ( const auto& dOpt : m_dNodes[dArgs[0]].m_pMapArg->m_dPairs )
{
if ( dOpt.m_sKey=="no_atc" && dOpt.m_iValue>0)
bNoATC = true;
else if ( dOpt.m_sKey=="json" && dOpt.m_iValue>0 )
bJsonOut = true;
}
}
if ( bNoATC )
uNodeFactorFlags &= ~SPH_FACTOR_CALC_ATC;
if ( bJsonOut )
uNodeFactorFlags |= SPH_FACTOR_JSON_OUT;
m_uPackedFactorFlags |= uNodeFactorFlags;
return new Expr_GetPackedFactors_c();
}
ISphExpr * ExprParser_t::CreateBitdotNode ( int iArgsNode, CSphVector<ISphExpr *> & dArgs )
{
assert ( dArgs.GetLength()>=1 );
ESphAttr eAttrType = m_dNodes[iArgsNode].m_eRetType;
switch ( eAttrType )
{
case SPH_ATTR_INTEGER: return new Expr_Bitdot_c<int> ( dArgs );
case SPH_ATTR_BIGINT: return new Expr_Bitdot_c<int64_t> ( dArgs );
default: return new Expr_Bitdot_c<float> ( dArgs );
}
}
ISphExpr * ExprParser_t::CreateAggregateNode ( const ExprNode_t & tNode, ESphAggrFunc eFunc, ISphExpr * pLeft )
{
const ExprNode_t & tLeft = m_dNodes [ tNode.m_iLeft ];
switch ( tLeft.m_iToken )
{
case TOK_ATTR_JSON: return new Expr_JsonFieldAggr_c ( pLeft, eFunc );
case TOK_ATTR_MVA32: return new Expr_MVAAggr_c<DWORD> ( tLeft.m_tLocator, GetNameByLocator(tLeft), eFunc );
case TOK_ATTR_MVA64: return new Expr_MVAAggr_c<int64_t> ( tLeft.m_tLocator, GetNameByLocator(tLeft), eFunc );
case TOK_COLUMNAR_UINT32SET:return CreateExpr_ColumnarMva32Aggr ( pLeft, eFunc );
case TOK_COLUMNAR_INT64SET: return CreateExpr_ColumnarMva64Aggr ( pLeft, eFunc );
default: return nullptr;
}
}
void ExprParser_t::FixupIterators ( int iNode, const char * sKey, SphAttr_t * pAttr )
{
if ( iNode==-1 )
return;
ExprNode_t & tNode = m_dNodes[iNode];
if ( tNode.m_iToken==TOK_IDENT && !strcmp ( sKey, tNode.m_sIdent ) )
{
tNode.m_iToken = TOK_ITERATOR;
tNode.m_pAttr = pAttr;
}
FixupIterators ( tNode.m_iLeft, sKey, pAttr );
FixupIterators ( tNode.m_iRight, sKey, pAttr );
}
ISphExpr * ExprParser_t::CreateForInNode ( int iNode )
{
ExprNode_t & tNode = m_dNodes[iNode];
int iFunc = tNode.m_iFunc;
int iExprNode = tNode.m_iLeft;
int iNameNode = tNode.m_iRight;
int iDataNode = m_dNodes[iNameNode].m_iLeft;
auto * pFunc = new Expr_ForIn_c ( CSphRefcountedPtr<ISphExpr> { CreateTree ( iDataNode )} , iFunc==FUNC_ALL, iFunc==FUNC_INDEXOF );
FixupIterators ( iExprNode, m_dNodes[iNameNode].m_sIdent, pFunc->GetRef() );
pFunc->SetExpr ( CSphRefcountedPtr<ISphExpr> { CreateTree ( iExprNode ) } );
return pFunc;
}
ISphExpr * ExprParser_t::CreateExprDateAdd ( int iNode, bool bAdd )
{
ExprNode_t & tNode = m_dNodes[iNode];
int iExprNode1 = tNode.m_iLeft;
int iExprNode2 = tNode.m_iRight;
auto pExpr1 = CreateTree ( iExprNode1 );
auto pExpr2 = CreateTree ( iExprNode2 );
// This is a hack ala REMAP
int iUnit = m_dNodes [ iNode-1 ].m_iConst;
return ::CreateExprDateAdd ( pExpr1, pExpr2, TimeUnit_e(iUnit), bAdd );
}
ISphExpr * ExprParser_t::CreateRegexNode ( ISphExpr * pAttr, ISphExpr * pString )
{
auto pExpr = new Expr_Regex_c ( pAttr, pString );
#if WITH_RE2
auto* pRe2 = pExpr->GetRE2();
if ( !pRe2->ok() )
{
m_sCreateError.SetSprintf ( "RE2: error parsing '%s': %s", pRe2->pattern().c_str(), pRe2->error().c_str() );
SafeDelete ( pExpr );
}
#endif
return pExpr;
}
ISphExpr * ExprParser_t::CreateConcatNode ( int iArgsNode, CSphVector<ISphExpr *> & dArgs )
{
for ( auto & i : dArgs )
if ( !CheckStoredArg(i) )
return nullptr;
CSphVector<ESphAttr> dTypes;
GatherArgRetTypes ( iArgsNode, dTypes );
ARRAY_FOREACH ( i, dTypes )
if ( dTypes[i]!=SPH_ATTR_STRING && dTypes[i]!=SPH_ATTR_STRINGPTR )
{
m_sCreateError.SetSprintf ( "all CONCAT() arguments must be strings (arg %d is not)", i+1 );
return nullptr;
}
CSphVector<bool> dConstStr;
GatherArgFN ( iArgsNode, [this, &dConstStr] (int i) { dConstStr.Add ( m_dNodes[i].m_iToken==TOK_CONST_STRING );});
return new Expr_Concat_c ( dArgs, dConstStr );
}
//////////////////////////////////////////////////////////////////////////
#define YY_DECL inline int yy1lex ( YYSTYPE * lvalp, void * yyscanner, ExprParser_t * pParser )
#include "flexsphinxexpr.c"
#ifndef NDEBUG
// using a proxy to be possible to debug inside yylex
inline int yylex ( YYSTYPE * lvalp, ExprParser_t * pParser )
{
int res = yy1lex ( lvalp, pParser->m_pScanner, pParser );
return res;
}
#else
inline int yylex ( YYSTYPE * lvalp, ExprParser_t * pParser )
{
return yy1lex ( lvalp, pParser->m_pScanner, pParser );
}
#endif
void yyerror ( ExprParser_t * pParser, const char * sMessage )
{
// flex put a zero at last token boundary; make it undo that
const auto* szToken = yy1lex_unhold ( pParser->m_pScanner );
pParser->m_sParserError.SetSprintf ( "P09: %s near '%s'", sMessage, szToken );
}
#include "bissphinxexpr.c"
//////////////////////////////////////////////////////////////////////////
ExprParser_t::~ExprParser_t ()
{
// i kinda own those things
ARRAY_FOREACH ( i, m_dNodes )
{
if ( m_dNodes[i].m_iToken==TOK_CONST_LIST )
SafeDelete ( m_dNodes[i].m_pConsts );
if ( m_dNodes[i].m_iToken==TOK_MAP_ARG )
SafeDelete ( m_dNodes[i].m_pMapArg );
}
// free any UDF calls that weren't taken over
ARRAY_FOREACH ( i, m_dUdfCalls )
SafeDelete ( m_dUdfCalls[i] );
// free temp map arguments storage
ARRAY_FOREACH ( i, m_dIdents )
SafeDeleteArray ( m_dIdents[i] );
}
ESphAttr ExprParser_t::GetWidestRet ( int iLeft, int iRight )
{
ESphAttr uLeftType = ( iLeft<0 ) ? SPH_ATTR_INTEGER : m_dNodes[iLeft].m_eRetType;
ESphAttr uRightType = ( iRight<0 ) ? SPH_ATTR_INTEGER : m_dNodes[iRight].m_eRetType;
if ( uLeftType==SPH_ATTR_INTEGER && uRightType==SPH_ATTR_INTEGER )
return SPH_ATTR_INTEGER;
if ( IsInt ( uLeftType ) && IsInt ( uRightType ) )
return SPH_ATTR_BIGINT;
// if json vs numeric then return numeric type (for the autoconversion)
if ( uLeftType==SPH_ATTR_JSON_FIELD && IsNumeric ( uRightType ) )
return uRightType;
if ( uRightType==SPH_ATTR_JSON_FIELD && IsNumeric ( uLeftType ) )
return uLeftType;
return SPH_ATTR_FLOAT;
}
int ExprParser_t::AddNodeInt ( int64_t iValue )
{
ExprNode_t & tNode = m_dNodes.Add ();
tNode.m_iToken = TOK_CONST_INT;
tNode.m_eRetType = GetIntType ( iValue );
tNode.m_iConst = iValue;
return m_dNodes.GetLength()-1;
}
int ExprParser_t::AddNodeFloat ( float fValue )
{
ExprNode_t & tNode = m_dNodes.Add ();
tNode.m_iToken = TOK_CONST_FLOAT;
tNode.m_eRetType = SPH_ATTR_FLOAT;
tNode.m_fConst = fValue;
return m_dNodes.GetLength()-1;
}
int ExprParser_t::AddNodeString ( int64_t iValue )
{
ExprNode_t & tNode = m_dNodes.Add ();
tNode.m_iToken = TOK_CONST_STRING;
tNode.m_eRetType = SPH_ATTR_STRING;
tNode.m_iConst = iValue;
return m_dNodes.GetLength()-1;
}
int ExprParser_t::AddNodeAttr ( int iTokenType, uint64_t uAttrLocator )
{
assert ( iTokenType==TOK_ATTR_INT || iTokenType==TOK_ATTR_BITS || iTokenType==TOK_ATTR_FLOAT
|| iTokenType==TOK_ATTR_MVA32 || iTokenType==TOK_ATTR_MVA64 || iTokenType==TOK_ATTR_STRING
|| iTokenType==TOK_ATTR_FACTORS || iTokenType==TOK_ATTR_JSON );
ExprNode_t & tNode = m_dNodes.Add ();
tNode.m_iToken = iTokenType;
sphUnpackAttrLocator ( uAttrLocator, &tNode );
bool bPtrAttr = tNode.m_tLocator.m_iBlobAttrId<0;
switch ( iTokenType )
{
case TOK_ATTR_FLOAT: tNode.m_eRetType = SPH_ATTR_FLOAT; break;
case TOK_ATTR_MVA32: tNode.m_eRetType = bPtrAttr ? SPH_ATTR_UINT32SET_PTR : SPH_ATTR_UINT32SET; break;
case TOK_ATTR_MVA64: tNode.m_eRetType = bPtrAttr ? SPH_ATTR_INT64SET_PTR : SPH_ATTR_INT64SET; break;
case TOK_ATTR_STRING: tNode.m_eRetType = SPH_ATTR_STRING; break;
case TOK_ATTR_FACTORS: tNode.m_eRetType = SPH_ATTR_FACTORS; break;
case TOK_ATTR_JSON: tNode.m_eRetType = SPH_ATTR_JSON_FIELD; break;
default:
tNode.m_eRetType = ( tNode.m_tLocator.m_iBitCount>32 ) ? SPH_ATTR_BIGINT : SPH_ATTR_INTEGER;
}
return m_dNodes.GetLength()-1;
}
int ExprParser_t::AddNodeColumnar ( int iTokenType, uint64_t uAttrLocator )
{
ExprNode_t & tNode = m_dNodes.Add();
tNode.m_iToken = iTokenType;
tNode.m_iLocator = (int)uAttrLocator;
switch ( iTokenType )
{
case TOK_COLUMNAR_INT: tNode.m_eRetType = SPH_ATTR_INTEGER; break;
case TOK_COLUMNAR_BOOL: tNode.m_eRetType = SPH_ATTR_BOOL; break;
case TOK_COLUMNAR_TIMESTAMP: tNode.m_eRetType = SPH_ATTR_TIMESTAMP; break;
case TOK_COLUMNAR_FLOAT: tNode.m_eRetType = SPH_ATTR_FLOAT; break;
case TOK_COLUMNAR_BIGINT: tNode.m_eRetType = SPH_ATTR_BIGINT; break;
case TOK_COLUMNAR_STRING: tNode.m_eRetType = SPH_ATTR_STRINGPTR; break;
case TOK_COLUMNAR_UINT32SET: tNode.m_eRetType = SPH_ATTR_UINT32SET_PTR; break;
case TOK_COLUMNAR_INT64SET: tNode.m_eRetType = SPH_ATTR_INT64SET_PTR; break;
case TOK_COLUMNAR_FLOATVEC: tNode.m_eRetType = SPH_ATTR_FLOAT_VECTOR_PTR; break;
default:
assert ( 0 && "Unsupported columnar type" );
break;
}
return m_dNodes.GetLength()-1;
}
int ExprParser_t::AddNodeField ( int iTokenType, uint64_t uAttrLocator )
{
assert ( iTokenType==TOK_FIELD );
ExprNode_t & tNode = m_dNodes.Add();
tNode.m_iToken = iTokenType;
tNode.m_iLocator = (int)uAttrLocator;
tNode.m_eRetType = SPH_ATTR_STRINGPTR;
return m_dNodes.GetLength()-1;
}
int ExprParser_t::AddNodeWeight ()
{
ExprNode_t & tNode = m_dNodes.Add ();
tNode.m_iToken = TOK_WEIGHT;
tNode.m_eRetType = SPH_ATTR_BIGINT;
return m_dNodes.GetLength()-1;
}
int ExprParser_t::AddNodeOp ( int iOp, int iLeft, int iRight )
{
ExprNode_t & tNode = m_dNodes.Add ();
tNode.m_iToken = iOp;
// deduce type
tNode.m_eRetType = SPH_ATTR_FLOAT; // default to float
switch (iOp)
{
case TOK_NEG: // NEG just inherits the type
tNode.m_eArgType = m_dNodes[iLeft].m_eRetType;
tNode.m_eRetType = tNode.m_eArgType;
break;
case TOK_NOT: // NOT result is integer, and its argument must be integer
tNode.m_eArgType = m_dNodes[iLeft].m_eRetType;
tNode.m_eRetType = SPH_ATTR_INTEGER;
if ( !IsInt ( tNode.m_eArgType ) )
{
m_sParserError.SetSprintf ( "NOT argument must be integer" );
return -1;
}
break;
case '&': case '|':
tNode.m_eArgType = GetWidestRet ( iLeft, iRight );
tNode.m_eRetType = tNode.m_eArgType;
if ( !IsInt ( tNode.m_eArgType ) ) // bitwise AND/OR can only be over ints
{
m_sParserError.SetSprintf ( "%s arguments must be integer", ( iOp=='&' ) ? "&" : "|" );
return -1;
}
break;
case TOK_LTE: case TOK_GTE: case TOK_EQ: case TOK_NE:
case '<': case '>': case TOK_IS_NULL: case TOK_IS_NOT_NULL:
tNode.m_eArgType = GetWidestRet ( iLeft, iRight );
tNode.m_eRetType = SPH_ATTR_INTEGER;
break;
case TOK_AND: case TOK_OR:
tNode.m_eArgType = GetWidestRet ( iLeft, iRight );
tNode.m_eRetType = SPH_ATTR_INTEGER;
if ( !IsInt ( tNode.m_eArgType )) // logical AND/OR can only be over ints
{
m_sParserError.SetSprintf ( "%s arguments must be integer", ( iOp==TOK_AND ) ? "AND" : "OR" );
return -1;
}
break;
case '+': case '-': case '*': case ',':
tNode.m_eArgType = GetWidestRet ( iLeft, iRight );
tNode.m_eRetType = tNode.m_eArgType;
break;
case '%':
tNode.m_eArgType = GetWidestRet ( iLeft, iRight );
tNode.m_eRetType = tNode.m_eArgType;
// MOD can only be over ints
if ( !IsInt ( tNode.m_eArgType ) )
{
m_sParserError.SetSprintf ( "MOD arguments must be integer" );
return -1;
}
break;
default:
// check for unknown op
assert ( iOp=='/' && "unknown op in AddNodeOp() type deducer" );
}
tNode.m_iArgs = 0;
if ( iOp==',' )
{
if ( iLeft>=0 ) tNode.m_iArgs += ( m_dNodes[iLeft].m_iToken==',' ) ? m_dNodes[iLeft].m_iArgs : 1;
if ( iRight>=0 ) tNode.m_iArgs += ( m_dNodes[iRight].m_iToken==',' ) ? m_dNodes[iRight].m_iArgs : 1;
}
// argument type conversion for functions like INDEXOF(), ALL() and ANY()
// we need no conversion for operands of comma!
if ( iOp!=',' && iLeft>=0 && iRight>=0 )
{
if ( m_dNodes[iRight].m_eRetType==SPH_ATTR_STRING && m_dNodes[iLeft].m_iToken==TOK_IDENT )
m_dNodes[iLeft].m_eRetType = SPH_ATTR_STRING;
else if ( m_dNodes[iLeft].m_eRetType==SPH_ATTR_STRING && m_dNodes[iRight].m_iToken==TOK_IDENT )
m_dNodes[iRight].m_eRetType = SPH_ATTR_STRING;
}
tNode.m_iLeft = iLeft;
tNode.m_iRight = iRight;
return m_dNodes.GetLength()-1;
}
// functions without args
int ExprParser_t::AddNodeFunc0 ( int iFunc )
{
// regular case, iFirst is entire arglist, iSecond is -1
// special case for IN(), iFirst is arg, iSecond is constlist
// special case for REMAP(), iFirst and iSecond are expressions, iThird and iFourth are constlists
assert ( iFunc>=0 && iFunc<int ( sizeof ( g_dFuncs ) / sizeof ( g_dFuncs[0] ) ) );
// assert ( g_dFuncs[iFunc].m_eFunc==(Tokh_e ) iFunc );
// check args count
int iExpectedArgc = g_dFuncs[iFunc].m_iArgs;
if ( iExpectedArgc )
{
m_sParserError.SetSprintf ( "%s() called without args, %d args expected", FuncNameByHash(iFunc), iExpectedArgc );
return -1;
}
// do add
ExprNode_t &tNode = m_dNodes.Add ();
tNode.m_iToken = TOK_FUNC;
tNode.m_iFunc = iFunc;
tNode.m_iLeft = -1;
tNode.m_iRight = -1;
tNode.m_eArgType = SPH_ATTR_INTEGER;
tNode.m_eRetType = g_dFuncs[iFunc].m_eRet;
// all ok
assert ( tNode.m_eRetType!=SPH_ATTR_NONE );
return m_dNodes.GetLength () - 1;
}
// functions with 1 arg
int ExprParser_t::AddNodeFunc ( int iFunc, int iArg )
{
// regular case, iFirst is entire arglist, iSecond is -1
// special case for IN(), iFirst is arg, iSecond is constlist
// special case for REMAP(), iFirst and iSecond are expressions, iThird and iFourth are constlists
assert ( iFunc>=0 && iFunc< int ( sizeof ( g_dFuncs )/sizeof ( g_dFuncs[0]) ) );
auto eFunc = (Tokh_e)iFunc;
// assert ( g_dFuncs [ iFunc ].m_eFunc==eFunc );
const char * sFuncName = FuncNameByHash ( iFunc );
// check args count
int iExpectedArgc = g_dFuncs [ iFunc ].m_iArgs;
int iArgc = 0;
if ( iArg>=0 )
iArgc = ( m_dNodes [ iArg ].m_iToken==',' ) ? m_dNodes [ iArg ].m_iArgs : 1;
if ( iExpectedArgc<0 )
{
if ( iArgc < -iExpectedArgc ) // space placed to avoid confusing ligature <-
{
m_sParserError.SetSprintf ( "%s() called with %d args, at least %d args expected", sFuncName, iArgc, -iExpectedArgc );
return -1;
}
} else if ( iArgc!=iExpectedArgc )
{
m_sParserError.SetSprintf ( "%s() called with %d args, %d args expected", sFuncName, iArgc, iExpectedArgc );
return -1;
}
// check arg types
//
// check for string args
// most builtin functions take numeric args only
bool bGotString = false, bGotMva = false;
CSphVector<ESphAttr> dRetTypes;
GatherArgRetTypes ( iArg, dRetTypes );
for ( ESphAttr eRetType: dRetTypes )
{
switch ( eRetType )
{
case SPH_ATTR_UINT32SET: case SPH_ATTR_INT64SET: case SPH_ATTR_UINT32SET_PTR: case SPH_ATTR_INT64SET_PTR: bGotMva = true; break;
case SPH_ATTR_STRING : bGotString = true;
default:;
}
}
if ( bGotString )
{
switch ( eFunc )
{
default: m_sParserError.SetSprintf ( "%s() arguments can not be string", sFuncName ); return -1;
case FUNC_LENGTH: case FUNC_TO_STRING: case FUNC_CONCAT: case FUNC_SUBSTRING_INDEX: case FUNC_UPPER: case FUNC_LOWER: case FUNC_CRC32:
case FUNC_EXIST: case FUNC_POLY2D: case FUNC_GEOPOLY2D: case FUNC_REGEX: case FUNC_LEVENSHTEIN: case FUNC_DATE_FORMAT: case FUNC_BIGINT:
case FUNC_DAY: case FUNC_MONTH: case FUNC_YEAR: case FUNC_YEARMONTH: case FUNC_YEARMONTHDAY:
case FUNC_SECOND: case FUNC_MINUTE: case FUNC_HOUR:
break;
}
}
if ( bGotMva )
{
switch ( eFunc )
{
default: m_sParserError.SetSprintf ( "%s() arguments can not be MVA", sFuncName ); return -1;
case FUNC_TO_STRING: case FUNC_LENGTH: case FUNC_LEAST: case FUNC_GREATEST:;
}
}
auto & dArg = m_dNodes[iArg];
switch ( eFunc )
{
case FUNC_BITDOT:
{ // check that first BITDOT arg is integer or bigint
int iLeftmost = iArg;
while ( m_dNodes[iLeftmost].m_iToken==',' )
iLeftmost = m_dNodes[iLeftmost].m_iLeft;
ESphAttr eArg = m_dNodes[iLeftmost].m_eRetType;
if ( !IsInt ( eArg ) )
{
m_sParserError.SetSprintf ( "first %s() argument must be integer", sFuncName );
return -1;
}
}
break;
case FUNC_EXIST:
{
ESphAttr eLeft = m_dNodes[dArg.m_iLeft].m_eRetType, eRight = m_dNodes[dArg.m_iRight].m_eRetType;
bool bIsLeftGood = ( eLeft==SPH_ATTR_STRING );
bool bIsRightGood = ( eRight==SPH_ATTR_INTEGER || eRight==SPH_ATTR_TIMESTAMP || eRight==SPH_ATTR_BOOL || eRight==SPH_ATTR_FLOAT || eRight==SPH_ATTR_BIGINT );
if ( !bIsLeftGood || !bIsRightGood )
{
if ( bIsRightGood )
m_sParserError.SetSprintf ( "first %s() argument must be string", sFuncName );
else
m_sParserError.SetSprintf ( "ill-formed %s", sFuncName );
return -1;
}
}
break;
case FUNC_SINT: // check that first SINT or timestamp family arg is integer or timestamp
case FUNC_FIBONACCI:
case FUNC_DAY:
case FUNC_MONTH:
case FUNC_YEAR:
case FUNC_YEARMONTH:
case FUNC_YEARMONTHDAY:
case FUNC_HOUR:
case FUNC_MINUTE:
case FUNC_SECOND:
assert ( iArg >= 0 );
if ( !( dArg.m_eRetType==SPH_ATTR_INTEGER
|| dArg.m_eRetType==SPH_ATTR_TIMESTAMP
|| dArg.m_eRetType==SPH_ATTR_BIGINT
|| dArg.m_eRetType==SPH_ATTR_JSON_FIELD
|| dArg.m_eRetType==SPH_ATTR_STRING
|| CanEvalNumbers ( dArg.m_iFunc ) ) )
{
m_sParserError.SetSprintf ( "%s() argument must be integer, bigint, timestamp, json, string or evaluated to number", sFuncName );
return -1;
}
break;
case FUNC_BIGINT:
assert ( iArg >= 0 );
if ( !( dArg.m_eRetType == SPH_ATTR_JSON_FIELD
|| IsFloatLike ( dArg.m_eRetType )
|| IsNumericLike ( dArg.m_eRetType )
|| CanEvalNumbers ( dArg.m_iFunc ) ) )
{
m_sParserError.SetSprintf ( "%s() argument must be number, or evaluated to number", sFuncName );
return -1;
}
break;
case FUNC_CONTAINS: // check that CONTAINS args are poly, float, float
assert ( dRetTypes.GetLength ()==3 );
if ( dRetTypes[0]!=SPH_ATTR_POLY2D )
{
m_sParserError.SetSprintf ( "1st CONTAINS() argument must be a 2D polygon (see POLY2D)" );
return -1;
}
if ( !( IsNumeric ( dRetTypes[1] ) || IsJson ( dRetTypes[1] ) ) || ! ( IsNumeric ( dRetTypes[2] ) || IsJson ( dRetTypes[2] ) ) )
{
m_sParserError.SetSprintf ( "2nd and 3rd CONTAINS() arguments must be numeric or JSON" );
return -1;
}
break;
case FUNC_POLY2D:
case FUNC_GEOPOLY2D:
if ( dRetTypes.GetLength ()==1 )
{
// handle 1 arg version, POLY2D(string-attr)
if ( dRetTypes[0]!=SPH_ATTR_STRING && dRetTypes[0]!=SPH_ATTR_STRINGPTR && dRetTypes[0]!=SPH_ATTR_JSON_FIELD )
{
m_sParserError.SetSprintf ( "%s() argument must be a string or JSON field attribute", sFuncName );
return -1;
}
} else if ( dRetTypes.GetLength ()<6 )
{
// handle 2..5 arg versions, invalid
m_sParserError.SetSprintf ( "bad %s() argument count, must be either 1 (string) or 6+ (x/y pairs list)"
, sFuncName );
return -1;
} else
{
// handle 6+ arg version, POLY2D(xy-list)
if ( dRetTypes.GetLength () & 1 )
{
m_sParserError.SetSprintf ( "bad %s() argument count, must be even", sFuncName );
return -1;
}
ARRAY_FOREACH ( i, dRetTypes )
if ( !( IsNumeric ( dRetTypes[i] ) || IsJson ( dRetTypes[i] ) ) )
{
m_sParserError.SetSprintf ( "%s() argument %d must be numeric or JSON field", sFuncName, 1 + i );
return -1;
}
}
break;
case FUNC_BM25F: // check that BM25F args are float, float [, {file_name=weight}]
if ( dRetTypes.GetLength ()>3 )
{
m_sParserError.SetSprintf ( "%s() called with %d args, at most 3 args expected", sFuncName
, dRetTypes.GetLength () );
return -1;
}
if ( dRetTypes[0]!=SPH_ATTR_FLOAT || dRetTypes[1]!=SPH_ATTR_FLOAT )
{
m_sParserError.SetSprintf ( "%s() arguments 1,2 must be numeric", sFuncName );
return -1;
}
if ( dRetTypes.GetLength ()==3 && dRetTypes[2]!=SPH_ATTR_MAPARG )
{
m_sParserError.SetSprintf ( "%s() argument 3 must be map", sFuncName );
return -1;
}
break;
case FUNC_SUBSTRING_INDEX:
if ( dRetTypes.GetLength()!=3 )
{
m_sParserError.SetSprintf ( "%s() called with %d args, but 3 args expected", sFuncName
, dRetTypes.GetLength () );
return -1;
}
if ( dRetTypes[0]!=SPH_ATTR_STRING && dRetTypes[0]!=SPH_ATTR_STRINGPTR && dRetTypes[0]!=SPH_ATTR_JSON && dRetTypes[0]!=SPH_ATTR_JSON_FIELD )
{
m_sParserError.SetSprintf ( "%s() argument 1 must be string or json", sFuncName );
return -1;
}
if ( dRetTypes[1]!=SPH_ATTR_STRING )
{
m_sParserError.SetSprintf ( "%s() arguments 2 must be string", sFuncName );
return -1;
}
if ( dRetTypes[2]!=SPH_ATTR_INTEGER )
{
m_sParserError.SetSprintf ( "%s() arguments 3 must be numeric", sFuncName );
return -1;
}
break;
case FUNC_UPPER:
case FUNC_LOWER:
if ( dRetTypes.GetLength()!=1 )
{
m_sParserError.SetSprintf ( "%s() called with %d args, but 1 arg expected", sFuncName, dRetTypes.GetLength () );
return -1;
}
if ( dRetTypes[0]!=SPH_ATTR_STRING && dRetTypes[0]!=SPH_ATTR_STRINGPTR && dRetTypes[0]!=SPH_ATTR_JSON && dRetTypes[0]!=SPH_ATTR_JSON_FIELD )
{
m_sParserError.SetSprintf ( "%s() argument 1 must be string or json", sFuncName );
return -1;
}
break;
case FUNC_GEODIST: // check GEODIST args count, and that optional arg 5 is a map argument
if ( dRetTypes.GetLength ()>5 )
{
m_sParserError.SetSprintf ( "%s() called with %d args, at most 5 args expected", sFuncName
, dRetTypes.GetLength () );
return -1;
}
if ( dRetTypes.GetLength ()==5 && dRetTypes[4]!=SPH_ATTR_MAPARG )
{
m_sParserError.SetSprintf ( "%s() argument 5 must be map", sFuncName );
return -1;
}
break;
case FUNC_REGEX:
{
#if WITH_RE2
ESphAttr eLeft = m_dNodes[dArg.m_iLeft].m_eRetType;
bool bIsLeftGood = ( eLeft==SPH_ATTR_STRING || eLeft==SPH_ATTR_STRINGPTR || eLeft==SPH_ATTR_JSON_FIELD );
if ( !bIsLeftGood )
{
m_sParserError.SetSprintf ( "first %s() argument must be string or JSON.field", sFuncName );
return -1;
}
ESphAttr eRight = m_dNodes[dArg.m_iRight].m_eRetType;
bool bIsRightGood = ( eRight==SPH_ATTR_STRING );
if ( !bIsRightGood )
{
m_sParserError.SetSprintf ( "second %s() argument must be string", sFuncName );
return -1;
}
#else
m_sParserError.SetSprintf ( "%s() used but no regexp support compiled", sFuncName );
return -1;
#endif
}
break;
case FUNC_DATE_FORMAT:
if ( dRetTypes.GetLength()!=2 )
{
m_sParserError.SetSprintf ( "%s() called with %d args, but 2 args expected", sFuncName, dRetTypes.GetLength() );
return -1;
}
if ( m_dNodes[0].m_eRetType!=SPH_ATTR_INTEGER && m_dNodes[0].m_eRetType!=SPH_ATTR_TIMESTAMP && m_dNodes[0].m_eRetType!=SPH_ATTR_BIGINT && m_dNodes[0].m_eRetType!=SPH_ATTR_STRING )
{
m_sParserError.SetSprintf ( "%s() argument 1 must be integer, bigint or timestamp, json, string", sFuncName );
return -1;
}
if ( dRetTypes[1]!=SPH_ATTR_STRING )
{
m_sParserError.SetSprintf ( "%s() arguments 2 must be string", sFuncName );
return -1;
}
break;
default:;
}
// do add
ExprNode_t & tNode = m_dNodes.Add ();
tNode.m_iToken = TOK_FUNC;
tNode.m_iFunc = iFunc;
tNode.m_iLeft = iArg;
tNode.m_iRight = -1;
tNode.m_eArgType = ( iArg>=0 ) ? m_dNodes [ iArg ].m_eRetType : SPH_ATTR_INTEGER;
tNode.m_eRetType = g_dFuncs [ iFunc ].m_eRet;
// fixup return type in a few special cases
switch ( eFunc )
{
case FUNC_MIN:
case FUNC_MAX:
case FUNC_MADD:
case FUNC_MUL3:
case FUNC_ABS:
case FUNC_IDIV:
if ( IsJson ( tNode.m_eArgType ) ) // auto-converter from JSON field for universal (SPH_ATTR_NONE return type) nodes
tNode.m_eRetType = SPH_ATTR_BIGINT;
else
tNode.m_eRetType = tNode.m_eArgType;
break;
case FUNC_EXIST:
{
ESphAttr eType = m_dNodes[m_dNodes[iArg].m_iRight].m_eRetType;
tNode.m_eArgType = eType;
tNode.m_eRetType = eType;
break;
}
case FUNC_BIGINT:
if ( tNode.m_eArgType==SPH_ATTR_FLOAT )
tNode.m_eRetType = SPH_ATTR_FLOAT; // enforce if we can; FIXME! silently ignores BIGINT() on floats; should warn or raise an error
break;
case FUNC_IF:
case FUNC_BITDOT:
tNode.m_eRetType = tNode.m_eArgType;
break;
case FUNC_GREATEST:
case FUNC_LEAST: // fixup MVA return type according to the leftmost argument
{
int iLeftmost = iArg;
while ( m_dNodes [ iLeftmost ].m_iToken==',' )
iLeftmost = m_dNodes [ iLeftmost ].m_iLeft;
ESphAttr eArg = m_dNodes [ iLeftmost ].m_eRetType;
if ( eArg==SPH_ATTR_INT64SET || eArg==SPH_ATTR_INT64SET_PTR )
tNode.m_eRetType = SPH_ATTR_BIGINT;
if ( eArg==SPH_ATTR_UINT32SET || eArg==SPH_ATTR_UINT32SET_PTR )
tNode.m_eRetType = SPH_ATTR_INTEGER;
}
break;
case FUNC_LEVENSHTEIN:
{
if ( dRetTypes.GetLength()<2 )
{
m_sParserError.SetSprintf ( "%s() called with %d args, but at least 2 args expected", sFuncName, dRetTypes.GetLength () );
return -1;
}
if ( dRetTypes[0]!=SPH_ATTR_STRING && dRetTypes[0]!=SPH_ATTR_STRINGPTR && dRetTypes[0]!=SPH_ATTR_JSON_FIELD )
{
m_sParserError.SetSprintf ( "%s() arguments 1 must be string", sFuncName );
return -1;
}
if ( dRetTypes[1]!=SPH_ATTR_STRING && dRetTypes[1]!=SPH_ATTR_STRINGPTR && dRetTypes[1]!=SPH_ATTR_JSON_FIELD )
{
m_sParserError.SetSprintf ( "%s() arguments 2 must be string", sFuncName );
return -1;
}
LevenshteinOptions_t tOpts;
if ( dRetTypes.GetLength()>2 )
{
if ( dRetTypes[2]!=SPH_ATTR_MAPARG )
{
m_sParserError.SetSprintf ( "%s() arguments 3 must be a map", sFuncName );
return -1;
}
CSphVector<int> dArgs = GatherArgNodes ( iArg );
assert ( dArgs.GetLength()==dRetTypes.GetLength() );
const CSphVector<CSphNamedVariant> & dOpts = m_dNodes[dArgs[2]].m_pMapArg->m_dPairs;
tOpts = GetOptions ( dOpts.Begin(), dOpts.GetLength() );
}
tNode.m_eRetType = ( tOpts.m_bNormalize ? SPH_ATTR_FLOAT : SPH_ATTR_INTEGER );
break;
}
case FUNC_RANGE:
case FUNC_DATE_RANGE:
{
if ( dRetTypes.GetLength()<2 )
{
m_sParserError.SetSprintf ( "%s() called with %d args, but at least 2 args expected", sFuncName, dRetTypes.GetLength () );
return -1;
}
if ( dRetTypes[0]!=SPH_ATTR_INTEGER && dRetTypes[0]!=SPH_ATTR_BIGINT && dRetTypes[0]!=SPH_ATTR_FLOAT && dRetTypes[0]!=SPH_ATTR_TIMESTAMP )
{
m_sParserError.SetSprintf ( "%s() argument 1 must be number, or evaluated to number", sFuncName );
return -1;
}
GatherArgFN ( iArg, [&] ( int i )
{
if ( i!=0 && m_dNodes[i].m_eRetType!=SPH_ATTR_MAPARG )
m_sParserError.SetSprintf ( "%s() argument %d must be a map", sFuncName, i+1 );
} );
}
break;
case FUNC_HISTOGRAM:
case FUNC_DATE_HISTOGRAM:
{
if ( dRetTypes.GetLength()!=2 )
{
m_sParserError.SetSprintf ( "%s() called with %d args, but at 2 args expected", sFuncName, dRetTypes.GetLength () );
return -1;
}
if ( dRetTypes[0]!=SPH_ATTR_INTEGER && dRetTypes[0]!=SPH_ATTR_BIGINT && dRetTypes[0]!=SPH_ATTR_FLOAT && dRetTypes[0]!=SPH_ATTR_TIMESTAMP )
{
m_sParserError.SetSprintf ( "%s() argument 1 must be number, or evaluated to number", sFuncName );
return -1;
}
if ( dRetTypes[1]!=SPH_ATTR_MAPARG )
{
m_sParserError.SetSprintf ( "%s() argument 2 must be map", sFuncName );
return -1;
}
}
break;
default:;
}
// all ok
assert ( tNode.m_eRetType!=SPH_ATTR_NONE );
return m_dNodes.GetLength()-1;
}
// special branch for all/any/indexof ( expr for x in arglist )
int ExprParser_t::AddNodeFor ( int iFunc, int iExpr, int iLoop )
{
assert ( iFunc>=0 && iFunc<int ( sizeof ( g_dFuncs ) / sizeof ( g_dFuncs[0] ) ) );
// assert ( g_dFuncs [ iFunc ].m_eFunc==eFunc );
const char * sFuncName = FuncNameByHash ( iFunc );
// check args count
if ( iLoop<0 )
{
int iArgc = 0;
if ( iExpr>=0 )
iArgc = ( m_dNodes[iExpr].m_iToken==',' ) ? m_dNodes[iExpr].m_iArgs : 1;
m_sParserError.SetSprintf ( "%s() called with %d args, at least 1 args expected", sFuncName, iArgc );
return -1;
}
// do add
ExprNode_t &tNode = m_dNodes.Add ();
tNode.m_iToken = TOK_FUNC;
tNode.m_iFunc = iFunc;
tNode.m_iLeft = iExpr;
tNode.m_iRight = iLoop;
tNode.m_eArgType = ( iExpr>=0 ) ? m_dNodes[iExpr].m_eRetType : SPH_ATTR_INTEGER;
tNode.m_eRetType = g_dFuncs[iFunc].m_eRet;
// all ok
assert ( tNode.m_eRetType!=SPH_ATTR_NONE );
return m_dNodes.GetLength () - 1;
}
int ExprParser_t::AddNodeDate ( int iFunc, int iExpr1, int iExpr2, int iUnit )
{
assert ( iFunc>=0 && iFunc<int ( sizeof ( g_dFuncs ) / sizeof ( g_dFuncs[0] ) ) );
ExprNode_t & tNode = m_dNodes.Add();
tNode.m_iToken = TOK_FUNC;
tNode.m_iFunc = iFunc;
tNode.m_iLeft = iExpr1;
tNode.m_iRight = iExpr2;
tNode.m_eArgType = SPH_ATTR_INTEGER;
tNode.m_eRetType = g_dFuncs[iFunc].m_eRet;
// all ok
assert ( tNode.m_eRetType!=SPH_ATTR_NONE );
return m_dNodes.GetLength () - 1;
}
int ExprParser_t::AddNodeIn ( int iArg, int iList )
{
// assert ( g_dFuncs[FUNC_IN].m_eFunc==FUNC_IN );
// check args count
if ( iList<0 )
{
m_sParserError.SetSprintf ( "in() called with <2 args, at least 2 args expected" );
return -1;
}
// do add
ExprNode_t &tNode = m_dNodes.Add ();
tNode.m_iToken = TOK_FUNC;
tNode.m_iFunc = FUNC_IN;
tNode.m_iLeft = iArg;
tNode.m_iRight = iList;
tNode.m_eArgType = ( iArg>=0 ) ? m_dNodes[iArg].m_eRetType : SPH_ATTR_INTEGER;
tNode.m_eRetType = g_dFuncs[FUNC_IN].m_eRet;
// all ok
assert ( tNode.m_eRetType!=SPH_ATTR_NONE );
return m_dNodes.GetLength () - 1;
}
int ExprParser_t::AddNodeRemap ( int iExpr1, int iExpr2, int iList1, int iList2 )
{
//assert ( g_dFuncs[FUNC_REMAP].m_eFunc==FUNC_REMAP );
if ( m_dNodes[iExpr1].m_iToken==TOK_IDENT )
{
m_sParserError.SetSprintf ( "remap() incorrect first argument (not integer?)" );
return 1;
}
if ( m_dNodes[iExpr2].m_iToken==TOK_IDENT )
{
m_sParserError.SetSprintf ( "remap() incorrect second argument (not integer/float?)" );
return 1;
}
if ( !IsInt ( m_dNodes[iExpr1].m_eRetType ) )
{
m_sParserError.SetSprintf ( "remap() first argument should result in integer value" );
return -1;
}
ESphAttr eSecondRet = m_dNodes[iExpr2].m_eRetType;
if ( !IsNumeric ( eSecondRet ) )
{
m_sParserError.SetSprintf ( "remap() second argument should result in integer or float value" );
return -1;
}
ConstList_c &tFirstList = *m_dNodes[iList1].m_pConsts;
ConstList_c &tSecondList = *m_dNodes[iList2].m_pConsts;
if ( tFirstList.m_dInts.GetLength ()==0 )
{
m_sParserError.SetSprintf ( "remap() first constlist should consist of integer values" );
return -1;
}
if ( tFirstList.m_dInts.GetLength ()!=tSecondList.m_dInts.GetLength () &&
tFirstList.m_dInts.GetLength ()!=tSecondList.m_dFloats.GetLength () )
{
m_sParserError.SetSprintf ( "remap() both constlists should have the same length" );
return -1;
}
if ( eSecondRet==SPH_ATTR_FLOAT && tSecondList.m_dFloats.GetLength ()==0 )
{
m_sParserError.SetSprintf ( "remap() second argument results in float value and thus fourth argument should be a list of floats" );
return -1;
}
if ( eSecondRet!=SPH_ATTR_FLOAT && tSecondList.m_dInts.GetLength ()==0 )
{
m_sParserError.SetSprintf ("remap() second argument results in integer value and thus fourth argument should be a list of integers" );
return -1;
}
// do add
ExprNode_t &tNode = m_dNodes.Add ();
tNode.m_iToken = TOK_FUNC;
tNode.m_iFunc = FUNC_REMAP;
tNode.m_iLeft = iExpr1;
tNode.m_iRight = iExpr2;
tNode.m_eArgType = m_dNodes[iExpr1].m_eRetType;
tNode.m_eRetType = m_dNodes[iExpr2].m_eRetType;
return m_dNodes.GetLength () - 1;
}
// functions RAND with 0 or 1 arg
int ExprParser_t::AddNodeRand ( int iArg )
{
// assert ( g_dFuncs[FUNC_RAND].m_eFunc==FUNC_RAND );
if ( iArg>=0 )
{
if ( !IsNumeric ( m_dNodes[iArg].m_eRetType ) )
{
m_sParserError.SetSprintf ( "rand() argument must be numeric" );
return -1;
}
int iArgc = ( m_dNodes[iArg].m_iToken==',' ) ? m_dNodes[iArg].m_iArgs : 1;
if ( iArgc>1 )
{
m_sParserError.SetSprintf ( "rand() called with %d args, either 0 or 1 args expected", iArgc );
return -1;
}
}
// do add
ExprNode_t &tNode = m_dNodes.Add ();
tNode.m_iToken = TOK_FUNC;
tNode.m_iFunc = FUNC_RAND;
tNode.m_iLeft = iArg;
tNode.m_iRight = -1;
tNode.m_eArgType = ( iArg>=0 ) ? m_dNodes[iArg].m_eRetType : SPH_ATTR_INTEGER;
tNode.m_eRetType = g_dFuncs[FUNC_RAND].m_eRet;
// all ok
assert ( tNode.m_eRetType!=SPH_ATTR_NONE );
return m_dNodes.GetLength () - 1;
}
int ExprParser_t::AddNodeUdf ( int iCall, int iArg )
{
UdfCall_t * pCall = m_dUdfCalls[iCall];
SPH_UDF_INIT & tInit = pCall->m_tInit;
SPH_UDF_ARGS & tArgs = pCall->m_tArgs;
// initialize UDF right here, at AST creation stage
// just because it's easy to gather arg types here
if ( iArg>=0 )
{
// gather arg types
CSphVector<DWORD> dArgTypes;
int iCur = iArg;
while ( iCur>=0 )
{
if ( m_dNodes[iCur].m_iToken!=',' )
{
const ExprNode_t & tNode = m_dNodes[iCur];
if ( tNode.m_iToken==TOK_FUNC && ( tNode.m_iFunc==FUNC_RANKFACTORS || tNode.m_iFunc==FUNC_FACTORS ) )
pCall->m_dArgs2Free.Add ( dArgTypes.GetLength() );
if ( tNode.m_eRetType==SPH_ATTR_JSON || tNode.m_eRetType==SPH_ATTR_JSON_FIELD )
pCall->m_dArgs2Free.Add ( dArgTypes.GetLength() );
dArgTypes.Add ( tNode.m_eRetType );
break;
}
int iRight = m_dNodes[iCur].m_iRight;
if ( iRight>=0 )
{
const ExprNode_t & tNode = m_dNodes[iRight];
assert ( tNode.m_iToken!=',' );
if ( tNode.m_iToken==TOK_FUNC && ( tNode.m_iFunc==FUNC_RANKFACTORS || tNode.m_iFunc==FUNC_FACTORS) )
pCall->m_dArgs2Free.Add ( dArgTypes.GetLength() );
if ( tNode.m_eRetType==SPH_ATTR_JSON || tNode.m_eRetType==SPH_ATTR_JSON_FIELD )
pCall->m_dArgs2Free.Add ( dArgTypes.GetLength() );
dArgTypes.Add ( tNode.m_eRetType );
}
iCur = m_dNodes[iCur].m_iLeft;
}
assert ( dArgTypes.GetLength() );
tArgs.arg_count = dArgTypes.GetLength();
tArgs.arg_types = new sphinx_udf_argtype [ tArgs.arg_count ];
// we gathered internal type ids in right-to-left order
// reverse and remap
// FIXME! eliminate remap, maybe?
ARRAY_FOREACH ( i, dArgTypes )
{
sphinx_udf_argtype & eRes = tArgs.arg_types [ tArgs.arg_count-1-i ];
switch ( dArgTypes[i] )
{
case SPH_ATTR_INTEGER:
case SPH_ATTR_TIMESTAMP:
case SPH_ATTR_BOOL:
eRes = SPH_UDF_TYPE_UINT32;
break;
case SPH_ATTR_FLOAT:
eRes = SPH_UDF_TYPE_FLOAT;
break;
case SPH_ATTR_BIGINT:
eRes = SPH_UDF_TYPE_INT64;
break;
case SPH_ATTR_STRING:
eRes = SPH_UDF_TYPE_STRING;
break;
case SPH_ATTR_UINT32SET:
case SPH_ATTR_UINT32SET_PTR:
eRes = SPH_UDF_TYPE_UINT32SET;
break;
case SPH_ATTR_INT64SET:
case SPH_ATTR_INT64SET_PTR:
eRes = SPH_UDF_TYPE_INT64SET;
break;
case SPH_ATTR_FACTORS:
eRes = SPH_UDF_TYPE_FACTORS;
pCall->m_dArgs2Free.Add ( i );
break;
case SPH_ATTR_JSON_FIELD:
eRes = SPH_UDF_TYPE_JSON;
break;
default:
m_sParserError.SetSprintf ( "internal error: unmapped UDF argument type (arg=%d, type=%u)", i, dArgTypes[i] );
return -1;
}
}
ARRAY_FOREACH ( i, pCall->m_dArgs2Free )
pCall->m_dArgs2Free[i] = tArgs.arg_count - 1 - pCall->m_dArgs2Free[i];
}
// init
if ( pCall->m_pUdf->m_fnInit )
{
char sError [ SPH_UDF_ERROR_LEN ];
if ( pCall->m_pUdf->m_fnInit ( &tInit, &tArgs, sError ) )
{
m_sParserError = sError;
return -1;
}
}
// do add
ExprNode_t & tNode = m_dNodes.Add ();
tNode.m_iToken = TOK_UDF;
tNode.m_iFunc = iCall;
tNode.m_iLeft = iArg;
tNode.m_iRight = -1;
// deduce type
tNode.m_eArgType = ( iArg>=0 ) ? m_dNodes[iArg].m_eRetType : SPH_ATTR_INTEGER;
tNode.m_eRetType = pCall->m_pUdf->m_eRetType;
return m_dNodes.GetLength()-1;
}
int ExprParser_t::AddNodePF ( int iFunc, int iArg )
{
assert ( iFunc>=0 && iFunc< int ( sizeof ( g_dFuncs )/sizeof ( g_dFuncs[0]) ) );
const char * sFuncName = FuncNameByHash ( iFunc );
CSphVector<ESphAttr> dRetTypes;
GatherArgRetTypes ( iArg, dRetTypes );
assert ( dRetTypes.GetLength()==0 || dRetTypes.GetLength()==1 );
if ( dRetTypes.GetLength()==1 && dRetTypes[0]!=SPH_ATTR_MAPARG )
{
m_sParserError.SetSprintf ( "%s() argument must be a map", sFuncName );
return -1;
}
ExprNode_t & tNode = m_dNodes.Add ();
tNode.m_iToken = TOK_FUNC;
tNode.m_iFunc = iFunc;
tNode.m_iLeft = iArg;
tNode.m_iRight = -1;
tNode.m_eArgType = SPH_ATTR_MAPARG;
tNode.m_eRetType = g_dFuncs[iFunc].m_eRet;
return m_dNodes.GetLength()-1;
}
int ExprParser_t::AddNodeConstlist ( int64_t iValue, bool bPackedString )
{
ExprNode_t & tNode = m_dNodes.Add();
tNode.m_iToken = TOK_CONST_LIST;
tNode.m_pConsts = new ConstList_c();
tNode.m_pConsts->Add ( iValue );
tNode.m_pConsts->m_sExpr = m_sExpr;
tNode.m_pConsts->m_bPackedStrings = bPackedString;
return m_dNodes.GetLength()-1;
}
int ExprParser_t::AddNodeConstlist ( float iValue )
{
ExprNode_t & tNode = m_dNodes.Add();
tNode.m_iToken = TOK_CONST_LIST;
tNode.m_pConsts = new ConstList_c();
tNode.m_pConsts->Add ( iValue );
return m_dNodes.GetLength()-1;
}
void ExprParser_t::AppendToConstlist ( int iNode, int64_t iValue )
{
m_dNodes[iNode].m_pConsts->Add ( iValue );
}
void ExprParser_t::AppendToConstlist ( int iNode, float iValue )
{
m_dNodes[iNode].m_pConsts->Add ( iValue );
}
int ExprParser_t::AddNodeUservar ( int iUservar )
{
ExprNode_t & tNode = m_dNodes.Add();
tNode.m_iToken = TOK_USERVAR;
tNode.m_iConst = iUservar;
return m_dNodes.GetLength()-1;
}
int ExprParser_t::AddNodeHookIdent ( int iID )
{
ExprNode_t & tNode = m_dNodes.Add();
tNode.m_iToken = TOK_HOOK_IDENT;
tNode.m_iFunc = iID;
tNode.m_eRetType = m_pHook->GetIdentType ( iID );
return m_dNodes.GetLength()-1;
}
int ExprParser_t::AddNodeHookFunc ( int iID, int iLeft )
{
CSphVector<ESphAttr> dArgTypes;
GatherArgRetTypes ( iLeft, dArgTypes );
ESphAttr eRet = m_pHook->GetReturnType ( iID, dArgTypes, CheckForConstSet ( iLeft, 0 ), m_sParserError );
if ( eRet==SPH_ATTR_NONE )
return -1;
ExprNode_t & tNode = m_dNodes.Add();
tNode.m_iToken = TOK_HOOK_FUNC;
tNode.m_iFunc = iID;
tNode.m_iLeft = iLeft;
tNode.m_iRight = -1;
// deduce type
tNode.m_eArgType = ( iLeft>=0 ) ? m_dNodes[iLeft].m_eRetType : SPH_ATTR_INTEGER;
tNode.m_eRetType = eRet;
return m_dNodes.GetLength()-1;
}
int ExprParser_t::AddNodeHookFunc ( int iID )
{
CSphVector<ESphAttr> dArgTypes;
ESphAttr eRet = m_pHook->GetReturnType ( iID, dArgTypes, true, m_sParserError );
if ( eRet==SPH_ATTR_NONE )
return -1;
ExprNode_t & tNode = m_dNodes.Add();
tNode.m_iToken = TOK_HOOK_FUNC;
tNode.m_iFunc = iID;
tNode.m_eRetType = eRet;
return m_dNodes.GetLength()-1;
}
int ExprParser_t::AddNodeMapArg ( const char * szKey, const char * szValue, int64_t iValue, float fValue, VariantType_e eType )
{
ExprNode_t & tNode = m_dNodes.Add();
tNode.m_iToken = TOK_MAP_ARG;
tNode.m_pMapArg = new MapArg_c();
CSphString sValue;
if ( szKey )
{
if ( eType==VariantType_e::STRING )
{
sValue = SqlUnescape ( m_sExpr.first + GetConstStrOffset(iValue), GetConstStrLength(iValue) );
szValue = sValue.cstr();
}
tNode.m_pMapArg->Add ( szKey, szValue, iValue, fValue, eType );
}
tNode.m_eRetType = SPH_ATTR_MAPARG;
return m_dNodes.GetLength()-1;
}
void ExprParser_t::AppendToMapArg ( int iNode, const char * szKey, const char * szValue, int64_t iValue, float fValue, VariantType_e eType )
{
CSphString sValue;
if ( eType==VariantType_e::STRING )
{
sValue = SqlUnescape ( m_sExpr.first + GetConstStrOffset(iValue), GetConstStrLength(iValue) );
szValue = sValue.cstr();
}
m_dNodes[iNode].m_pMapArg->Add ( szKey, szValue, iValue, fValue, eType );
}
const char * ExprParser_t::Attr2Ident ( uint64_t uAttrLoc )
{
ExprNode_t tAttr;
sphUnpackAttrLocator ( uAttrLoc, &tAttr );
CSphString sIdent;
sIdent = m_pSchema->GetAttr ( tAttr.m_iLocator ).m_sName;
m_dIdents.Add ( sIdent.Leak() );
return m_dIdents.Last();
}
const char * ExprParser_t::Field2Ident ( uint64_t uAttrLoc )
{
CSphString sIdent;
sIdent = m_pSchema->GetField ( (int)uAttrLoc ).m_sName;
m_dIdents.Add ( sIdent.Leak() );
return m_dIdents.Last();
}
int ExprParser_t::AddNodeJsonField ( uint64_t uAttrLocator, int iLeft )
{
int iNode = AddNodeAttr ( TOK_ATTR_JSON, uAttrLocator );
m_dNodes[iNode].m_iLeft = iLeft;
return m_dNodes.GetLength()-1;
}
int ExprParser_t::AddNodeJsonSubkey ( int64_t iValue )
{
ExprNode_t & tNode = m_dNodes.Add ();
tNode.m_iToken = TOK_SUBKEY;
tNode.m_eRetType = SPH_ATTR_STRING;
tNode.m_iConst = iValue;
return m_dNodes.GetLength()-1;
}
int ExprParser_t::AddNodeDotNumber ( int64_t iValue )
{
ExprNode_t & tNode = m_dNodes.Add ();
tNode.m_iToken = TOK_CONST_FLOAT;
tNode.m_eRetType = SPH_ATTR_FLOAT;
const char * pCur = m_sExpr.first + (int)( iValue>>32 );
tNode.m_fConst = (float) strtod ( pCur-1, nullptr );
return m_dNodes.GetLength()-1;
}
int ExprParser_t::AddNodeIdent ( const char * sKey, int iLeft )
{
ExprNode_t & tNode = m_dNodes.Add ();
tNode.m_sIdent = sKey;
tNode.m_iLeft = iLeft;
tNode.m_iToken = TOK_IDENT;
tNode.m_eRetType = SPH_ATTR_JSON_FIELD;
return m_dNodes.GetLength()-1;
}
int ExprParser_t::ParseJoinAttr ( const char * szTable, uint64_t uOffset )
{
CSphString sAttrName;
sAttrName.SetBinary ( m_sExpr.first + GetConstStrOffset(uOffset), GetConstStrLength(uOffset) );
CSphString sAttrWithTable;
sAttrWithTable.SetSprintf ( "%s.%s", szTable, sAttrName.cstr() );
int iAttr = m_pSchema->GetAttrIndex ( sAttrWithTable.cstr() );
if ( iAttr==-1 )
m_sParserError.SetSprintf ( "unknown attribute '%s'", sAttrWithTable.cstr() );
return iAttr;
}
int ExprParser_t::AddNodeWithTable ( const char * szTable, uint64_t uOffset )
{
int iAttr = ParseJoinAttr ( szTable, uOffset );
if ( iAttr==-1 )
return -1;
YYSTYPE yylval;
int iType = ParseAttr ( iAttr, m_pSchema->GetAttr(iAttr).m_sName.cstr(), &yylval );
bool bColumnar = iType==TOK_COLUMNAR_INT || iType==SPH_ATTR_TIMESTAMP || iType==TOK_COLUMNAR_TIMESTAMP || iType==SPH_ATTR_FLOAT || iType==TOK_COLUMNAR_FLOAT || iType==TOK_COLUMNAR_BIGINT || iType==TOK_COLUMNAR_BOOL || iType==TOK_COLUMNAR_STRING || iType==TOK_COLUMNAR_UINT32SET || iType==TOK_COLUMNAR_INT64SET || iType==TOK_COLUMNAR_FLOATVEC;
return bColumnar ? AddNodeColumnar ( iType, yylval.iAttrLocator ) : AddNodeAttr ( iType, yylval.iAttrLocator );
}
uint64_t ExprParser_t::ParseAttrWithTable ( const char * szTable, uint64_t uOffset )
{
int iAttr = ParseJoinAttr ( szTable, uOffset );
if ( iAttr==-1 )
return 0;
// lvalp->iAttrLocator
return sphPackAttrLocator ( m_pSchema->GetAttr(iAttr).m_tLocator, iAttr );
}
//////////////////////////////////////////////////////////////////////////
// performs simple semantic analysis
// checks operand types for some arithmetic operators
struct TypeCheck_fn
{
CSphString m_sError;
void Enter ( const ExprNode_t & tNode, const CSphVector<ExprNode_t> & dNodes )
{
if ( !m_sError.IsEmpty() )
return;
bool bNumberOp = tNode.m_iToken=='+' || tNode.m_iToken=='-' || tNode.m_iToken=='*' || tNode.m_iToken=='/';
if ( bNumberOp )
{
bool bLeftNumeric = tNode.m_iLeft<0 ? false : IsNumericNode ( dNodes[tNode.m_iLeft] );
bool bRightNumeric = tNode.m_iRight<0 ? false : IsNumericNode ( dNodes[tNode.m_iRight] );
// if json vs numeric then let it pass (for the autoconversion)
if ( ( bLeftNumeric && !bRightNumeric && dNodes[tNode.m_iRight].m_eRetType==SPH_ATTR_JSON_FIELD )
|| ( bRightNumeric && !bLeftNumeric && dNodes[tNode.m_iLeft].m_eRetType==SPH_ATTR_JSON_FIELD ) )
return;
if ( !bLeftNumeric || !bRightNumeric )
{
m_sError = "numeric operation applied to non-numeric operands";
return;
}
}
if ( tNode.m_iToken==TOK_EQ )
{
// string equal must work with string columns only
ESphAttr eLeftRet = tNode.m_iLeft<0 ? SPH_ATTR_NONE : dNodes[tNode.m_iLeft].m_eRetType;
ESphAttr eRightRet = tNode.m_iRight<0 ? SPH_ATTR_NONE : dNodes[tNode.m_iRight].m_eRetType;
bool bLeftStr = ( eLeftRet==SPH_ATTR_STRING || eLeftRet==SPH_ATTR_STRINGPTR || eLeftRet==SPH_ATTR_JSON_FIELD );
bool bRightStr = ( eRightRet==SPH_ATTR_STRING || eRightRet==SPH_ATTR_STRINGPTR || eRightRet==SPH_ATTR_JSON_FIELD );
if ( bLeftStr!=bRightStr && eLeftRet!=SPH_ATTR_JSON_FIELD && eRightRet!=SPH_ATTR_JSON_FIELD )
{
m_sError = "equal operation applied to part string operands";
return;
}
}
}
void Exit ( const ExprNode_t & )
{}
bool IsNumericNode ( const ExprNode_t & tNode )
{
return tNode.m_eRetType==SPH_ATTR_INTEGER || tNode.m_eRetType==SPH_ATTR_BOOL || tNode.m_eRetType==SPH_ATTR_FLOAT ||
tNode.m_eRetType==SPH_ATTR_BIGINT || tNode.m_eRetType==SPH_ATTR_TOKENCOUNT || tNode.m_eRetType==SPH_ATTR_TIMESTAMP;
}
};
// checks whether we have a WEIGHT() in expression
struct WeightCheck_fn
{
bool * m_pRes;
explicit WeightCheck_fn ( bool * pRes )
: m_pRes ( pRes )
{
assert ( m_pRes );
*m_pRes = false;
}
void Enter ( const ExprNode_t & tNode, const CSphVector<ExprNode_t> & )
{
if ( tNode.m_iToken==TOK_WEIGHT )
*m_pRes = true;
}
void Exit ( const ExprNode_t & )
{}
};
// checks whether expression has functions defined not in this file like
// searchd-level function or ranker-level functions
struct HookCheck_fn
{
ISphExprHook * m_pHook;
explicit HookCheck_fn ( ISphExprHook * pHook )
: m_pHook ( pHook )
{}
void Enter ( const ExprNode_t & tNode, const CSphVector<ExprNode_t> & )
{
if ( tNode.m_iToken==TOK_HOOK_IDENT || tNode.m_iToken==TOK_HOOK_FUNC )
m_pHook->CheckEnter ( tNode.m_iFunc );
}
void Exit ( const ExprNode_t & tNode )
{
if ( tNode.m_iToken==TOK_HOOK_IDENT || tNode.m_iToken==TOK_HOOK_FUNC )
m_pHook->CheckExit ( tNode.m_iFunc );
}
};
static int EXPR_STACK_EVAL = 160;
static int EXPR_STACK_CREATE = 400;
void SetExprNodeStackItemSize ( int iCreateSize, int iEvalSize )
{
if ( iCreateSize>EXPR_STACK_CREATE )
EXPR_STACK_CREATE = iCreateSize;
if ( iEvalSize>EXPR_STACK_EVAL )
EXPR_STACK_EVAL = iEvalSize;
}
ISphExpr * ExprParser_t::Parse ( const char * sExpr, const ISphSchema & tSchema, const CSphString * pJoinIdx, ESphAttr * pAttrType, bool * pUsesWeight, CSphString & sError )
{
const char* szExpr = sExpr;
// fixme! provide shared access to semi-parsed items.
CSphString sCopy ( sExpr ); szExpr = sCopy.cstr();
m_sLexerError = ""; //lexer
m_sParserError = "";
m_sCreateError = "";
// setup lexer
m_sExpr = { szExpr, (int)strlen (szExpr) };
m_pSchema = &tSchema;
m_pJoinIdx = pJoinIdx;
// setup constant functions
m_iConstNow = (int) time ( nullptr );
// build abstract syntax tree
m_iParsed = -1;
// alternative parser
yy1lex_init ( &m_pScanner );
char * sEnd = const_cast<char *>( szExpr+m_sExpr.second );
char cMemLast = sEnd[1];
if ( cMemLast )
sEnd[1] = 0; // this is ok because string allocates a small gap
YY_BUFFER_STATE tLexerBuffer = yy1_scan_buffer ( const_cast<char*>(szExpr), m_sExpr.second+2, m_pScanner );
yyparse ( this );
yy1_delete_buffer ( tLexerBuffer, m_pScanner );
yy1lex_destroy ( m_pScanner );
if ( cMemLast )
sEnd[1] = cMemLast; // this is ok because string allocates a small gap
// handle errors
if ( m_iParsed<0 || !m_sLexerError.IsEmpty() || !m_sParserError.IsEmpty() )
{
sError = !m_sLexerError.IsEmpty() ? m_sLexerError : m_sParserError;
if ( sError.IsEmpty() ) sError = "general parsing error";
return nullptr;
}
// deduce return type
ESphAttr eAttrType = m_dNodes[m_iParsed].m_eRetType;
// pooled MVA/string attributes are ok to use in expressions, but storing them into schema requires their _PTR counterparts
if ( eAttrType==SPH_ATTR_UINT32SET || eAttrType==SPH_ATTR_INT64SET || eAttrType==SPH_ATTR_STRING )
eAttrType = sphPlainAttrToPtrAttr(eAttrType);
// Check expression stack to fit for mutual recursive function calls.
// This check is an approximation, because different compilers with
// different settings produce code which requires different stack size.
const int TREE_SIZE_THRESH = 20;
const StackSizeTuplet_t tExprStack = { EXPR_STACK_CREATE, EXPR_STACK_EVAL };
int iStackNeeded = -1;
if ( !EvalStackForTree ( m_dNodes, m_iParsed, tExprStack, TREE_SIZE_THRESH, iStackNeeded, "expressions", sError ) )
return nullptr;
ISphExpr * pExpr = nullptr;
Threads::Coro::Continue ( iStackNeeded, [&] {
pExpr = Create ( pUsesWeight, sError );
if ( pAttrType )
*pAttrType = eAttrType;
} );
if ( pExpr && iStackNeeded>0 )
{
auto pChildExpr = pExpr;
pExpr = new Expr_ProxyFat_c ( pChildExpr );
pChildExpr->Release();
}
return pExpr;
}
#ifndef NDEBUG
static void CheckDescendingNodes ( const CSphVector<ExprNode_t> & dNodes )
{
ARRAY_CONSTFOREACH( i, dNodes )
{
assert ( i>dNodes[i].m_iLeft );
assert ( i>dNodes[i].m_iRight );
}
}
#endif
ISphExpr * ExprParser_t::Create ( bool * pUsesWeight, CSphString & sError )
{
if ( GetError () )
return nullptr;
#ifndef NDEBUG
CheckDescendingNodes ( m_dNodes );
#endif
// perform optimizations (tree transformations)
Optimize ( m_iParsed );
// fixme! canonize pass breaks constraight on "1+2+3*aaa"
#ifndef NDEBUG
// CheckDescendingNodes ( m_dNodes );
#endif
// simple semantic analysis
TypeCheck_fn tTypeChecker;
WalkTree ( m_iParsed, tTypeChecker );
if ( !tTypeChecker.m_sError.IsEmpty() )
{
sError.Swap ( tTypeChecker.m_sError );
return nullptr;
}
// create evaluator
CSphRefcountedPtr<ISphExpr> pRes { CreateTree ( m_iParsed ) };
if ( !m_sCreateError.IsEmpty() )
{
pRes = nullptr;
sError = m_sCreateError;
}
else if ( !pRes )
{
sError.SetSprintf ( "empty expression" );
}
if ( pUsesWeight )
{
WeightCheck_fn tWeightFunctor ( pUsesWeight );
WalkTree ( m_iParsed, tWeightFunctor );
}
if ( m_pHook )
{
HookCheck_fn tHookFunctor ( m_pHook );
WalkTree ( m_iParsed, tHookFunctor );
}
return pRes.Leak();
}
//////////////////////////////////////////////////////////////////////////
// PUBLIC STUFF
//////////////////////////////////////////////////////////////////////////
JoinArgs_t::JoinArgs_t ( const ISphSchema & tJoinedSchema, const CSphString & sIndex1, const CSphString & sIndex2 )
: m_tJoinedSchema ( tJoinedSchema )
, m_sIndex1 ( sIndex1 )
, m_sIndex2 ( sIndex2 )
{}
/// parser entry point
ISphExpr * sphExprParse ( const char * szExpr, const ISphSchema & tSchema, const CSphString * pJoinIdx, CSphString & sError, ExprParseArgs_t & tArgs )
{
// parse into opcodes
ExprParser_t tParser ( tArgs.m_pHook, tArgs.m_pProfiler, tArgs.m_eCollation );
ISphExpr * pRes = tParser.Parse ( szExpr, tSchema, pJoinIdx, tArgs.m_pAttrType, tArgs.m_pUsesWeight, sError );
if ( tArgs.m_pZonespanlist )
*tArgs.m_pZonespanlist = tParser.m_bHasZonespanlist;
if ( tArgs.m_pEvalStage )
*tArgs.m_pEvalStage = tParser.m_eEvalStage;
if ( tArgs.m_pPackedFactorsFlags )
*tArgs.m_pPackedFactorsFlags = tParser.m_uPackedFactorFlags;
if ( tArgs.m_pStoredField )
*tArgs.m_pStoredField = tParser.m_uStoredField;
if ( tArgs.m_pNeedDocIds )
*tArgs.m_pNeedDocIds = tParser.m_bNeedDocIds;
return pRes;
}
/// json type autoconversion
ISphExpr * sphJsonFieldConv ( ISphExpr * pExpr )
{
return new Expr_JsonFieldConv_c ( pExpr );
}
void FetchAttrDependencies ( StrVec_t & dAttrNames, const ISphSchema & tSchema )
{
for ( const auto & i : dAttrNames )
{
const CSphColumnInfo * pAttr = tSchema.GetAttr ( i.cstr() );
if ( !pAttr || !pAttr->m_pExpr )
continue;
int iOldLen = dAttrNames.GetLength();
pAttr->m_pExpr->Command ( SPH_EXPR_GET_DEPENDENT_COLS, &dAttrNames );
for ( int iNewAttr = iOldLen; iNewAttr < dAttrNames.GetLength(); iNewAttr++ )
if ( dAttrNames[iNewAttr]==i )
dAttrNames.Remove(iNewAttr);
}
dAttrNames.Uniq();
}
| 312,854
|
C++
|
.cpp
| 9,105
| 31.556288
| 343
| 0.67014
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,827
|
netfetch.cpp
|
manticoresoftware_manticoresearch/src/netfetch.cpp
|
//
// Copyright (c) 2022-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "netfetch.h"
#include "sphinxutils.h"
#include "coroutine.h"
#include "networking_daemon.h"
#define LOG_LEVEL_MULTIINFO false
#define LOG_LEVEL_TIMERCB ( LOG_LEVEL_MULTIINFO && false )
#define LOG_LEVEL_CULRSOCKET false
#define LOG_LEVEL_CURLEASY false
#define LOG_LEVEL_CB false
#define CURL_VERBOSE false
#define CURL_VERBOSE_CB ( CURL_VERBOSE && false )
#define CURL_PROGRESS false
/// usually DEBUG, but can be WARNING for easier bug investigations
#define LDEBUG DEBUG
/// logging helpers
#define LOG_COMPONENT_FUNCLINE __func__ << " @ " << __LINE__ << " "
#define LOGWARN( Level, Component ) LOGMSG ( WARNING, Level, Component )
#define STATIC_INFO LONGINFO ( CB, FUNCLINE )
// used in CurlMulti functions
#define LOG_COMPONENT_CURLMULTI "CurlMulti_c::" << LOG_COMPONENT_FUNCLINE
#define MULTI_LOG( LEVEL ) LOGMSG ( LEVEL, MULTIINFO, CURLMULTI )
#define MULTI_INFO MULTI_LOG ( INFO )
#define MULTI_WARN MULTI_LOG ( WARNING )
#define MULTI_DEBUG MULTI_LOG ( LDEBUG )
// used in CurlEasy functions
#define LOG_COMPONENT_CONN "CurlConn_t::" << LOG_COMPONENT_FUNCLINE << this << "(" << m_pCurlEasy << ") " << "(" << m_sUrl << ") "
#define CONN_LOG( LEVEL ) LOGMSG ( LEVEL, CURLEASY, CONN )
#define CONN_INFO CONN_LOG ( INFO )
#define CONN_WARN CONN_LOG ( WARNING )
#define CONN_DEBUG CONN_LOG ( LDEBUG )
// used in socket functions
#define LOG_COMPONENT_SOCKET "CurlSocket_t::" << LOG_COMPONENT_FUNCLINE << this << " (refs " << GetRefcount() << ", sock " << m_iSock << ") "
#define SOCKET_LOG( LEVEL ) LOGMSG ( LEVEL, CULRSOCKET, SOCKET )
#define SOCKET_INFO SOCKET_LOG ( INFO )
#define SOCKET_WARN SOCKET_LOG ( WARNING )
#define SOCKET_DEBUG SOCKET_LOG ( LDEBUG )
// used in timer cb
#define LOG_COMPONENT_TIMER LOG_COMPONENT_CURLMULTI << STime() << " "
#define TIMER_LOG( LEVEL ) LOGMSG ( LEVEL, TIMERCB, TIMER )
#define TIMER_INFO TIMER_LOG ( INFO )
#define TIMER_WARNING TIMER_LOG ( WARNING )
#define TIMER_DEBUG TIMER_LOG ( LDEBUG )
// used in static cbacks, one param must be called 'handle'
#define LOG_COMPONENT_CURLCB LOG_COMPONENT_FUNCLINE << handle << " "
#define CURLCB_LOG( LEVEL ) LOGMSG ( LEVEL, CB, CURLCB )
#define CB_INFO CURLCB_LOG ( INFO )
#define CB_WARN CURLCB_LOG ( WARN )
#define CB_DEBUG CURLCB_LOG ( LDEBUG )
#if WITH_CURL
#include "datetime.h"
#include <curl/curl.h>
#if DL_CURL
static decltype ( &curl_global_init ) sph_curl_global_init = nullptr;
static decltype ( &curl_global_cleanup ) sph_curl_global_cleanup = nullptr;
static decltype ( &curl_multi_init ) sph_curl_multi_init = nullptr;
static decltype ( &curl_multi_cleanup ) sph_curl_multi_cleanup = nullptr;
static decltype ( &curl_multi_add_handle ) sph_curl_multi_add_handle = nullptr;
static decltype ( &curl_multi_remove_handle ) sph_curl_multi_remove_handle = nullptr;
static decltype ( &curl_multi_setopt ) sph_curl_multi_setopt = nullptr;
static decltype ( &curl_multi_socket_action ) sph_curl_multi_socket_action = nullptr;
static decltype ( &curl_multi_info_read ) sph_curl_multi_info_read = nullptr;
static decltype ( &curl_multi_assign ) sph_curl_multi_assign = nullptr;
static decltype ( &curl_easy_init ) sph_curl_easy_init = nullptr;
static decltype ( &curl_easy_cleanup ) sph_curl_easy_cleanup = nullptr;
static decltype ( &curl_easy_setopt ) sph_curl_easy_setopt = nullptr;
static decltype ( &curl_easy_getinfo ) sph_curl_easy_getinfo = nullptr;
static decltype ( &curl_slist_append ) sph_curl_slist_append = nullptr;
static decltype ( &curl_slist_free_all ) sph_curl_slist_free_all = nullptr;
static bool InitDynamicCurl()
{
const char* sFuncs[] = {
"curl_global_init",
"curl_global_cleanup",
"curl_multi_init",
"curl_multi_cleanup",
"curl_multi_add_handle",
"curl_multi_remove_handle",
"curl_multi_setopt",
"curl_multi_socket_action",
"curl_multi_info_read",
"curl_multi_assign",
"curl_easy_init",
"curl_easy_cleanup",
"curl_easy_setopt",
"curl_easy_getinfo",
"curl_slist_append",
"curl_slist_free_all",
};
void** pFuncs[] = {
(void**)&sph_curl_global_init,
(void**)&sph_curl_global_cleanup,
(void**)&sph_curl_multi_init,
(void**)&sph_curl_multi_cleanup,
(void**)&sph_curl_multi_add_handle,
(void**)&sph_curl_multi_remove_handle,
(void**)&sph_curl_multi_setopt,
(void**)&sph_curl_multi_socket_action,
(void**)&sph_curl_multi_info_read,
(void**)&sph_curl_multi_assign,
(void**)&sph_curl_easy_init,
(void**)&sph_curl_easy_cleanup,
(void**)&sph_curl_easy_setopt,
(void**)&sph_curl_easy_getinfo,
(void**)&sph_curl_slist_append,
(void**)&sph_curl_slist_free_all,
};
static CSphDynamicLibrary dLib ( CURL_LIB );
return dLib.LoadSymbols ( sFuncs, pFuncs, sizeof ( pFuncs ) / sizeof ( void** ) );
}
#else
#define sph_curl_global_init curl_global_init
#define sph_curl_global_cleanup curl_global_cleanup
#define sph_curl_multi_init curl_multi_init
#define sph_curl_multi_cleanup curl_multi_cleanup
#define sph_curl_multi_add_handle curl_multi_add_handle
#define sph_curl_multi_remove_handle curl_multi_remove_handle
#define sph_curl_multi_setopt curl_multi_setopt
#define sph_curl_multi_socket_action curl_multi_socket_action
#define sph_curl_multi_info_read curl_multi_info_read
#define sph_curl_multi_assign curl_multi_assign
#define sph_curl_easy_init curl_easy_init
#define sph_curl_easy_cleanup curl_easy_cleanup
#define sph_curl_easy_setopt curl_easy_setopt
#define sph_curl_easy_getinfo curl_easy_getinfo
#define sph_curl_slist_append curl_slist_append
#define sph_curl_slist_free_all curl_slist_free_all
#define InitDynamicCurl() ( true )
#endif
static CSphString STime (int64_t iNow=-1)
{
if (iNow<0)
iNow = sphMicroTimer();
time_t ts = (time_t)( iNow / 1000000 ); // on some systems (eg. FreeBSD 6.2), tv.tv_sec has another type, and we can't just pass it
cctz::civil_second tCS = ConvertTimeUTC(ts);
StringBuilder_c tOut;
tOut << Digits<2> ( tCS.hour() ) << ':' << Digits<2> ( tCS.minute() ) << ':' << Digits<2> ( tCS.second() ) << '.' << FixedNum<10, 3, 0, '0'> ( ( iNow % 1000000 ) / 1000 );
CSphString sRes;
tOut.MoveTo ( sRes );
return sRes;
}
// helper used for logging
inline static const char* CurlPollName ( int iWhat )
{
static const char* WhatStrs[] = { "NONE", "IN", "OUT", "INOUT", "REMOVE" };
return iWhat<sizeof(WhatStrs) ? WhatStrs[iWhat] : "UNKNOWN";
}
// forward def
class CurlSocket_c;
// Common curl for all connections
class CurlMulti_c
{
private:
CURLM* m_pCurlMulti;
Threads::RoledSchedulerSharedPtr_t m_tStrand;
MiniTimer_c m_tTimer;
static bool m_bInitialized;
CurlSocket_c* MakeAsyncSocket ( curl_socket_t tCurlSocket ) const;
int CmSocketCb ( curl_socket_t tCurlSocket, int iWhat, void* pSocketData ) const REQUIRES ( CurlStrand() );
void StrandSocketAction ( curl_socket_t tCurlSocket, int iWhat ) const
{
Threads::Coro::Go ( [this, tCurlSocket, iWhat]() REQUIRES ( CurlStrand() ) {
SocketAction ( tCurlSocket, iWhat );
}, CurlStrand() );
}
void CheckTimeouts() const
{
TIMER_DEBUG;
StrandSocketAction ( CURL_SOCKET_TIMEOUT, 0 );
}
int CmTimerCb ( long iTimeoutMS ) REQUIRES ( CurlStrand() )
{
if ( iTimeoutMS < 0 )
{
m_tTimer.UnEngage();
TIMER_INFO << "unengage";
}
else if ( iTimeoutMS == 0 )
{
TIMER_DEBUG << "immediate check";
StrandSocketAction ( CURL_SOCKET_TIMEOUT, 0 );
} else
{
auto iEngaged = m_tTimer.Engage ( iTimeoutMS == 1 ? 2 : iTimeoutMS ); // add 1MS jitter
TIMER_DEBUG << "Engage for " << iTimeoutMS << " (" << STime(iEngaged) << ")";
}
return 0;
}
static int CmSocketCbJump ( CURL*, curl_socket_t tCurlSocket, int iWhat, void* pUserData, void* pSocketData ) NO_THREAD_SAFETY_ANALYSIS
{
auto pThis = (CurlMulti_c*)pUserData;
return pThis->CmSocketCb ( tCurlSocket, iWhat, pSocketData );
}
static int CmTimerCbJump ( CURLM* pcurl, long iTimeoutMS, void* pUserData ) NO_THREAD_SAFETY_ANALYSIS
{
auto pThis = (CurlMulti_c*)pUserData;
assert ( pThis->m_pCurlMulti == pcurl );
return pThis->CmTimerCb ( iTimeoutMS );
}
public:
CurlMulti_c()
: m_tTimer { "CurlMulti", [this] { CheckTimeouts(); } }
{
sph_curl_global_init ( CURL_GLOBAL_DEFAULT );
m_pCurlMulti = sph_curl_multi_init();
// socket cb
sph_curl_multi_setopt ( m_pCurlMulti, CURLMOPT_SOCKETFUNCTION, &CmSocketCbJump );
sph_curl_multi_setopt ( m_pCurlMulti, CURLMOPT_SOCKETDATA, this );
// timer cb
sph_curl_multi_setopt ( m_pCurlMulti, CURLMOPT_TIMERFUNCTION, &CmTimerCbJump );
sph_curl_multi_setopt ( m_pCurlMulti, CURLMOPT_TIMERDATA, this );
m_tStrand = MakeAloneScheduler ( GlobalWorkPool(), "curl_serial" );
m_bInitialized = true;
MULTI_INFO;
}
~CurlMulti_c()
{
Deinit();
}
void WriteSocketCookie ( curl_socket_t tCurlSocket, void* pCookie ) const REQUIRES ( CurlStrand() )
{
auto x = sph_curl_multi_assign ( m_pCurlMulti, tCurlSocket, pCookie );
MULTI_INFO << "curl_multi_assign for socket " << tCurlSocket << ", cookie " << pCookie << " returned " << x;
}
void SocketAction ( curl_socket_t tCurlSocket, int iWhat ) const REQUIRES ( CurlStrand() );
Threads::SchedRole CurlStrand() const RETURN_CAPABILITY ( m_tStrand )
{
return m_tStrand;
}
CURLM* GetMultiPtr() const
{
return m_pCurlMulti;
}
inline static bool IsInitialized()
{
return m_bInitialized;
}
void Deinit()
{
MULTI_INFO;
if ( !m_bInitialized )
return;
sph_curl_multi_cleanup ( m_pCurlMulti );
sph_curl_global_cleanup();
m_bInitialized = false;
MULTI_INFO;
}
};
bool CurlMulti_c::m_bInitialized = false;
CurlMulti_c& CurlMulti()
{
static CurlMulti_c tCurlMulti;
return tCurlMulti;
}
Threads::SchedRole CurlStrand() RETURN_CAPABILITY ( CurlMulti().CurlStrand() )
{
return CurlMulti().CurlStrand();
}
// wrapper over curl socket.
// Notice, that curl manages connections itself, we don't open/accept anything here, but just provide async io.
class CurlSocket_c final: public ISphNetAction
{
const CurlMulti_c* m_pCurlMulti;
CSphRefcountedPtr<CSphNetLoop> m_pNetLoop;
int m_iNotifiedCurlEvents = CURL_POLL_NONE;
int m_iLastEngaged = CURL_POLL_NONE;
protected:
~CurlSocket_c() final
{
SOCKET_INFO;
}
public:
CurlSocket_c ( int iSock, const CurlMulti_c* pOwner )
: ISphNetAction ( iSock )
, m_pCurlMulti ( pOwner )
{
assert ( m_pCurlMulti );
auto pNetLoop = GetAvailableNetLoop();
SafeAddRef ( pNetLoop );
m_pNetLoop = pNetLoop;
SOCKET_INFO;
}
void Process() final
{
int iCurlEvents = CURL_POLL_NONE;
if ( CheckSocketError() || m_uGotEvents == IS_TIMEOUT ) // real socket error
{
iCurlEvents = CURL_POLL_REMOVE;
} else
{
bool bRead = m_uGotEvents & NetPollEvent_t::IS_READ;
bool bWrite = m_uGotEvents & NetPollEvent_t::IS_WRITE;
if ( bRead && bWrite )
iCurlEvents = CURL_POLL_INOUT;
else if ( bRead )
iCurlEvents = CURL_POLL_IN;
else if ( bWrite )
iCurlEvents = CURL_POLL_OUT;
else
iCurlEvents = CURL_POLL_NONE;
}
NotifyCurl ( iCurlEvents );
}
void NetLoopDestroying() final
{
Release();
}
public:
void Engage ( int iCurlWhat, bool bExternal = true ) REQUIRES ( m_pCurlMulti->CurlStrand() )
{
SOCKET_DEBUG << ( bExternal ? "external" : "internal" ) << " -> " << CurlPollName ( iCurlWhat );
m_iLastEngaged = iCurlWhat;
m_uIOChange = NetPollEvent_t::SET_ONESHOT;
switch ( iCurlWhat )
{
case CURL_POLL_IN: m_uIOChange |= NetPollEvent_t::SET_READ; break;
case CURL_POLL_INOUT: m_uIOChange |= NetPollEvent_t::SET_RW; break;
case CURL_POLL_OUT: m_uIOChange |= NetPollEvent_t::SET_WRITE; break;
case CURL_POLL_REMOVE: return Remove();
default: break;
}
m_pNetLoop->AddAction ( this );
}
private:
void NotifyCurl ( int iCurlEvents )
{
if ( m_iNotifiedCurlEvents == iCurlEvents )
return;
SOCKET_DEBUG << "got events " << m_uGotEvents << ", " << CurlPollName ( iCurlEvents );
m_iNotifiedCurlEvents = iCurlEvents;
AddRef();
Threads::Coro::Go ( [this, iCurlEvents]() REQUIRES ( m_pCurlMulti->CurlStrand() ) {
CSphRefcountedPtr<ISphNetAction> pWorkKeeper { this };
m_pCurlMulti->SocketAction ( m_iSock, iCurlEvents );
CurlNotified();
},
m_pCurlMulti->CurlStrand() );
}
void CurlNotified() REQUIRES ( m_pCurlMulti->CurlStrand() )
{
SOCKET_DEBUG;
m_iNotifiedCurlEvents = CURL_POLL_NONE;
if ( m_iLastEngaged != CURL_POLL_REMOVE )
Engage ( m_iLastEngaged, false );
}
void Remove() REQUIRES ( m_pCurlMulti->CurlStrand() )
{
SOCKET_INFO;
m_iLastEngaged = CURL_POLL_REMOVE;
m_uIOChange = NetPollEvent_t::SET_NONE;
m_pNetLoop->AddAction ( this );
m_pCurlMulti->WriteSocketCookie ( m_iSock, nullptr );
Release();
}
};
#ifdef CURLOPT_XFERINFOFUNCTION
static int ProgressCb ( void* handle, curl_off_t dltotal, curl_off_t dlnow, curl_off_t ultotal, curl_off_t ulnow )
{
auto* pConn = (CurlConn_t*)handle;
CB_DEBUG << ": " << pConn->m_sUrl
<< " d (" << dlnow << "/" << dltotal << ")"
<< " u (" << ulnow << "/" << ultotal << ")";
return 0;
}
#endif
#if CURL_VERBOSE_CB
inline static const char* CurlInfoType ( int iType )
{
const char* InfotypeStrs[] = { "CURLINFO_TEXT", "CURLINFO_HEADER_IN", "CURLINFO_HEADER_OUT", "CURLINFO_DATA_IN", "CURLINFO_DATA_OUT", "CURLINFO_SSL_DATA_IN", "CURLINFO_SSL_DATA_OUT", "CURLINFO_END" };
return iType < sizeof ( InfotypeStrs ) ? InfotypeStrs[iType] : "UNKNOWN";
}
static int DebugCbJump ( CURL* handle, curl_infotype type, char* data, size_t size, void* pUserData )
{
CB_INFO << "chunk " << CurlInfoType ( type ) << " with " << size << " bytes.";
return 0;
}
#endif
using CurlConnListHook_t = boost::intrusive::slist_member_hook<>;
struct CurlConn_t
{
CURL* m_pCurlEasy;
CurlMulti_c* m_pCurlMulti = nullptr;
CSphString m_sUrl;
char m_sError[CURL_ERROR_SIZE];
CURLcode m_uReturnCode = CURLE_OK;
CSphVector<BYTE> m_dData;
Threads::Coro::Waker_c m_tWaker;
CurlConnListHook_t m_tHook;
int WriteCb ( ByteBlob_t dData )
{
CONN_DEBUG << "received " << dData.second << " bytes";
m_dData.Append(dData);
return dData.second;
}
static size_t WriteCbJump ( void* pData, size_t tSize, size_t nItems, void* pUserData )
{
assert ( tSize == 1 );
auto pConn = (CurlConn_t*)pUserData;
return pConn->WriteCb ( { (const BYTE*)pData, tSize * nItems } );
}
template<typename TPARAM>
inline void SetCurlOpt ( CURLoption eOpt, TPARAM tParam ) const
{
sph_curl_easy_setopt ( m_pCurlEasy, eOpt, tParam );
}
CurlConn_t ( CSphString sUrl, Threads::Coro::Worker_c* pWorker ) REQUIRES ( CurlStrand() )
: m_sUrl { std::move(sUrl) }
, m_tWaker { Threads::CreateWaker ( pWorker ) }
{
m_pCurlEasy = sph_curl_easy_init();
// basic options from customer
SetCurlOpt ( CURLOPT_URL, m_sUrl.cstr() );
SetCurlOpt ( CURLOPT_FOLLOWLOCATION, 1 );
// extra service options
SetCurlOpt ( CURLOPT_NOSIGNAL, 1 );
// options we absolutely NEED
SetCurlOpt ( CURLOPT_WRITEFUNCTION, WriteCbJump );
SetCurlOpt ( CURLOPT_WRITEDATA, this );
SetCurlOpt ( CURLOPT_ERRORBUFFER, m_sError );
SetCurlOpt ( CURLOPT_PRIVATE, this ); // to use via CURLINFO_PRIVATE from curl_easy_getinfo
#if CURL_VERBOSE
SetCurlOpt ( CURLOPT_VERBOSE, 1 );
#if CURL_VERBOSE_CB
SetCurlOpt ( CURLOPT_DEBUGFUNCTION, DebugCbJump );
SetCurlOpt ( CURLOPT_DEBUGDATA, this );
#endif
#endif
#if CURL_PROGRESS
SetCurlOpt ( CURLOPT_NOPROGRESS, 0 );
#ifdef CURLOPT_XFERINFOFUNCTION
SetCurlOpt ( CURLOPT_XFERINFOFUNCTION, ProgressCb );
#endif
#ifdef CURLOPT_XFERINFODATA
SetCurlOpt ( CURLOPT_XFERINFODATA, this );
#endif
#endif
}
void RunQuery() REQUIRES ( CurlStrand() );
void Done ( CURLcode uResult ) REQUIRES ( CurlStrand() );
~CurlConn_t()
{
CONN_INFO;
sph_curl_easy_cleanup ( m_pCurlEasy );
}
};
static bool operator== ( const CurlConn_t& tA, const CurlConn_t& tB ) noexcept
{
return tA.m_pCurlEasy == tB.m_pCurlEasy;
}
using CurlConnList_t = boost::intrusive::slist<CurlConn_t,
boost::intrusive::member_hook<CurlConn_t, CurlConnListHook_t, &CurlConn_t::m_tHook>,
boost::intrusive::constant_time_size<false>,
boost::intrusive::cache_last<false>>;
CurlConnList_t g_tCurlConnections GUARDED_BY ( CurlStrand() );
void CurlConn_t::RunQuery() REQUIRES ( CurlStrand() )
{
m_pCurlMulti = &CurlMulti();
CONN_INFO << "Add to multi " << m_pCurlMulti << " for " << m_sUrl;
g_tCurlConnections.push_front ( *this );
auto iRes = sph_curl_multi_add_handle ( m_pCurlMulti->GetMultiPtr(), m_pCurlEasy );
CONN_INFO << "Add complete -> " << iRes;
}
void CurlConn_t::Done ( CURLcode uResult ) REQUIRES ( CurlStrand() )
{
g_tCurlConnections.remove ( *this ); // this op requires operator==
assert ( m_pCurlMulti );
m_uReturnCode = uResult;
CONN_INFO << "DONE: (" << uResult << ") " << m_sError;
sph_curl_multi_remove_handle ( m_pCurlMulti->GetMultiPtr(), m_pCurlEasy );
m_pCurlMulti = nullptr;
m_tWaker.Wake ( true );
}
static void CurlConnectionIsDone ( CURLMsg* pMsg ) REQUIRES ( CurlStrand() )
{
assert ( pMsg->msg == CURLMSG_DONE );
auto pEasy = pMsg->easy_handle;
CurlConn_t* pConn;
sph_curl_easy_getinfo ( pEasy, CURLINFO_PRIVATE, &pConn );
pConn->Done ( pMsg->data.result );
}
void CurlMulti_c::SocketAction ( curl_socket_t tCurlSocket, int iWhat ) const REQUIRES ( CurlStrand() ) NO_THREAD_SAFETY_ANALYSIS
{
MULTI_DEBUG << "invoke curl_multi_socket_action with socket " << tCurlSocket << ", " << CurlPollName(iWhat);
int iLeft = 0;
auto x = sph_curl_multi_socket_action ( m_pCurlMulti, tCurlSocket, iWhat, &iLeft );
MULTI_DEBUG << "curl_multi_socket_action returned " << x << ", " << iLeft << " tasks running.";
while ( true )
{
int iLeftInQueue;
auto pMsg = sph_curl_multi_info_read ( m_pCurlMulti, &iLeftInQueue );
if ( !pMsg )
break;
if ( pMsg->msg == CURLMSG_DONE )
CurlConnectionIsDone ( pMsg );
}
}
CurlSocket_c* CurlMulti_c::MakeAsyncSocket ( curl_socket_t tCurlSocket ) const
{
MULTI_INFO << "called with sock " << tCurlSocket;
auto pSocket = new CurlSocket_c ( tCurlSocket, this );
Threads::ScopedScheduler_c tSerialFiber { CurlStrand() };
WriteSocketCookie ( tCurlSocket, reinterpret_cast<void*> ( pSocket ) );
return pSocket;
}
int CurlMulti_c::CmSocketCb ( curl_socket_t tCurlSocket, int iWhat, void* pSocketData ) const NO_THREAD_SAFETY_ANALYSIS
{
MULTI_INFO << "called with sock " << tCurlSocket << " for " << CurlPollName ( iWhat ) << "(" << iWhat << "), data " << pSocketData;
auto* pAsyncSocket = pSocketData ? (CurlSocket_c*)pSocketData : MakeAsyncSocket ( tCurlSocket );
switch ( iWhat )
{
case CURL_POLL_REMOVE:
case CURL_POLL_IN:
case CURL_POLL_OUT:
case CURL_POLL_INOUT:
pAsyncSocket->Engage ( iWhat );
default: break;
}
return 0;
}
class CurlHttpHeaders_c
{
struct curl_slist* m_pHttpHeadersList = nullptr;
public:
CurlHttpHeaders_c() = default;
~CurlHttpHeaders_c()
{
if ( m_pHttpHeadersList )
sph_curl_slist_free_all ( m_pHttpHeadersList );
}
void Append ( const char* szHeader )
{
m_pHttpHeadersList = sph_curl_slist_append ( m_pHttpHeadersList, szHeader );
}
inline curl_slist* CurlSlist() const { return m_pHttpHeadersList; }
};
bool IsCurlAvailable()
{
static bool bCurlLoaded = false;
if ( !bCurlLoaded )
bCurlLoaded = InitDynamicCurl();
return bCurlLoaded;
}
#if !defined (CURL_LIB)
#define CURL_LIB "internal"
#endif
static const char * szNoCurlMsg = CURL_LIB " not found";
using CurlOpt_t = std::pair<CURLoption, intptr_t>;
std::pair<bool, CSphString> InvokeCurl ( CSphString sUrl, const VecTraits_T<CurlOpt_t>& dParams )
{
if ( !IsCurlAvailable() )
return { false, szNoCurlMsg };
using namespace Threads::Coro;
auto pWorker = CurrentWorker();
std::unique_ptr<CurlConn_t> pRequest;
YieldWith ( [&]() mutable {
Go ( [&]() REQUIRES ( CurlStrand() ) mutable {
pRequest = std::make_unique<CurlConn_t> ( std::move ( sUrl ), pWorker );
dParams.for_each ( [&pRequest] ( const auto tParam ) { pRequest->SetCurlOpt ( tParam.first, tParam.second ); } );
pRequest->RunQuery();
},
CurlStrand() );
} );
if ( pRequest->m_uReturnCode != CURLE_OK )
return { false, pRequest->m_sError };
return { true, CSphString ( pRequest->m_dData ) };
}
CSphString FetchUrl ( const CSphString& sUrl )
{
CSphVector<CurlOpt_t> dCurlOpts;
dCurlOpts.Add ( { (CURLoption)237, 0L } ); // CURLOPT_PIPEWAIT. If not exist, will be just ignored
auto [bSuccess, sResult] = InvokeCurl ( sUrl, dCurlOpts );
return sResult;
}
std::pair<bool, CSphString> PostToHelperUrl ( CSphString sUrl, Str_t sQuery, const VecTraits_T<CSphString>& dHeaders )
{
CSphVector<CurlOpt_t> dOptions;
dOptions.Add ( { CURLOPT_POST, 1 } );
dOptions.Add ( { CURLOPT_POSTFIELDSIZE, sQuery.second } );
dOptions.Add ( { CURLOPT_POSTFIELDS, (intptr_t)sQuery.first } );
dOptions.Add ( { (CURLoption)237, 0L } ); // CURLOPT_PIPEWAIT. If not exist, will be just ignored
CurlHttpHeaders_c tHeaders;
dHeaders.for_each ( [&tHeaders] ( const auto& sHeader ) { tHeaders.Append ( sHeader.cstr() ); } );
tHeaders.Append ( "Content-Type: application/json; charset=UTF-8" );
dOptions.Add ( { CURLOPT_HTTPHEADER, (intptr_t)tHeaders.CurlSlist() } );
return InvokeCurl ( std::move ( sUrl ), dOptions );
}
void ShutdownCurl()
{
if ( !CurlMulti_c::IsInitialized() )
return;
Threads::CallPlainCoroutine ( []() REQUIRES ( CurlStrand() ) {
if ( !CurlMulti_c::IsInitialized() )
return;
while ( !g_tCurlConnections.empty() )
{
auto& tConn = g_tCurlConnections.front();
strcpy ( tConn.m_sError, "Interrupted due to shutdown" );
tConn.Done ( CURLE_ABORTED_BY_CALLBACK );
}
CurlMulti().Deinit();
},
CurlStrand() );
}
#else // WITH_CURL
static const char* szNoCurlMsg = "No CURL support compiled in";
CSphString FetchUrl ( const CSphString& sUrl )
{
return szNoCurlMsg;
}
std::pair<bool, CSphString> PostToHelperUrl ( CSphString sUrl, Str_t sQuery, const VecTraits_T<CSphString>& dHeaders )
{
return { false, szNoCurlMsg };
}
void ShutdownCurl()
{}
bool IsCurlAvailable()
{
return false;
}
#endif
| 22,177
|
C++
|
.cpp
| 628
| 33.079618
| 201
| 0.713592
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,828
|
coro_stack.cpp
|
manticoresoftware_manticoresearch/src/coro_stack.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "coro_stack.h"
#include "std/env.h"
// that is bug in protected_fixedsize on Win, this header is missed on inclusion
#include <boost/assert.hpp>
#include <boost/context/protected_fixedsize_stack.hpp>
#include <boost/context/fixedsize_stack.hpp>
namespace Threads {
static StackFlavour_E g_eStackFlavour = val_from_env ( "MANTICORE_GUARDED_STACK", false ) ? StackFlavour_E::protected_fixedsize : StackFlavour_E::fixedsize;
inline size_t AlignStackSize ( size_t iSize )
{
return ( iSize + STACK_ALIGN - 1 ) & ~( STACK_ALIGN - 1 );
}
CoroStack_t AllocateStack ( size_t iStack )
{
switch ( g_eStackFlavour )
{
case StackFlavour_E::fixedsize:
{
boost::context::fixedsize_stack allocator { iStack ? AlignStackSize ( iStack ) : DEFAULT_CORO_STACK_SIZE };
return { allocator.allocate(), StackFlavour_E::fixedsize };
}
case StackFlavour_E::protected_fixedsize:
{
boost::context::protected_fixedsize_stack allocator { iStack ? AlignStackSize ( iStack ) : DEFAULT_CORO_STACK_SIZE };
auto tStack = allocator.allocate(); tStack.size -= boost::context::protected_fixedsize_stack::traits_type::page_size(); // align guard page
return { tStack, StackFlavour_E::protected_fixedsize };
}
default:
assert(false && "should not be here");
}
}
CoroStack_t MockedStack ( VecTraits_T<BYTE> dStack )
{
boost::context::stack_context tStack;
tStack.sp = &dStack.Last();
tStack.size = dStack.GetLength();
#if defined( BOOST_USE_VALGRIND )
tStack.valgrind_stack_id = VALGRIND_STACK_REGISTER ( dStack.begin(), tStack.sp );
#endif
return { tStack, StackFlavour_E::mocked_prealloc };
}
void DeallocateStack ( CoroStack_t tStack )
{
switch ( tStack.second )
{
case StackFlavour_E::fixedsize:
{
boost::context::fixedsize_stack allocator { 0 };
allocator.deallocate ( tStack.first );
break;
}
case StackFlavour_E::protected_fixedsize:
{
boost::context::protected_fixedsize_stack allocator { 0 };
tStack.first.size += boost::context::protected_fixedsize_stack::traits_type::page_size(); // undo guard page align
allocator.deallocate ( tStack.first );
break;
}
case StackFlavour_E::mocked_prealloc:
{
#if defined( BOOST_USE_VALGRIND )
VALGRIND_STACK_DEREGISTER ( tStack.first.valgrind_stack_id );
#endif
}
}
}
} // namespace Threads
| 2,678
|
C++
|
.cpp
| 76
| 33
| 156
| 0.737452
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,829
|
task_info.cpp
|
manticoresoftware_manticoresearch/src/task_info.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "task_info.h"
#include "threadutils.h"
namespace { // static
const size_t NINFOS = 256;
RenderFnPtr pInfos[NINFOS] = { nullptr };
std::atomic<int> dCounters[NINFOS];
std::atomic<BYTE> uFreeInfoSlot {1}; // 0-th slot is a mark of 'invalid'
}
BYTE RegisterRenderer ( RenderFnPtr pFunc ) noexcept
{
BYTE uRender = uFreeInfoSlot.fetch_add ( 1, std::memory_order_relaxed );
pInfos[uRender] = pFunc;
dCounters[uRender].store ( 0 );
return uRender;
}
void RefCount_t::Inc ( BYTE eType )
{
if ( eType >= uFreeInfoSlot )
sphWarning ( "Wrong RefCountInc slot! type=%d, free slot = %d", eType, uFreeInfoSlot.load() );
assert ( eType<uFreeInfoSlot );
if ( eType )
dCounters[eType].fetch_add ( 1, std::memory_order_relaxed );
}
void RefCount_t::Dec ( BYTE eType )
{
if ( eType>=uFreeInfoSlot )
sphWarning ( "Wrong RefCountDec slot! type=%d, free slot = %d", eType, uFreeInfoSlot.load () );
assert ( eType<uFreeInfoSlot );
if ( eType )
dCounters[eType].fetch_sub ( 1, std::memory_order_relaxed );
}
int myinfo::Count ( BYTE eType )
{
assert ( eType<uFreeInfoSlot );
return dCounters[eType].load ( std::memory_order_relaxed );
}
int myinfo::CountAll ()
{
int iRes = 0;
for ( int i = 1, iLast = uFreeInfoSlot.load ( std::memory_order_relaxed ); i<iLast; ++i )
iRes += dCounters[i].load ( std::memory_order_relaxed );
return iRes;
}
void PublicThreadDesc_t::Swap ( PublicThreadDesc_t & rhs )
{
::Swap ( m_iThreadID, rhs.m_iThreadID );
::Swap ( m_tmStart, rhs.m_tmStart );
::Swap ( m_tmLastJobStartTimeUS, rhs.m_tmLastJobStartTimeUS );
::Swap ( m_tmLastJobDoneTimeUS, rhs.m_tmLastJobDoneTimeUS );
::Swap ( m_tmTotalWorkedTimeUS, rhs.m_tmTotalWorkedTimeUS );
::Swap ( m_tmTotalWorkedCPUTimeUS, rhs.m_tmTotalWorkedCPUTimeUS );
::Swap ( m_iTotalJobsDone, rhs.m_iTotalJobsDone );
::Swap ( m_sThreadName, rhs.m_sThreadName );
::Swap ( m_sClientName, rhs.m_sClientName );
::Swap ( m_sDescription, rhs.m_sDescription );
::Swap ( m_sProto, rhs.m_sProto );
::Swap ( m_tmConnect, rhs.m_tmConnect );
::Swap ( m_pQuery, rhs.m_pQuery );
::Swap ( m_szCommand, rhs.m_szCommand );
::Swap ( m_iConnID, rhs.m_iConnID );
::Swap ( m_eProto, rhs.m_eProto );
::Swap ( m_eTaskState, rhs.m_eTaskState );
::Swap ( m_sChain, rhs.m_sChain );
}
void CopyBasicThreadInfo ( const Threads::LowThreadDesc_t * pSrc, PublicThreadDesc_t & dDst )
{
dDst.m_iThreadID = pSrc->m_iThreadID;
if ( !dDst.m_tmStart )
dDst.m_tmStart.emplace ( pSrc->m_tmStart );
dDst.m_tmLastJobStartTimeUS = pSrc->m_tmLastJobStartTimeUS;
dDst.m_tmLastJobDoneTimeUS = pSrc->m_tmLastJobDoneTimeUS;
dDst.m_tmTotalWorkedTimeUS = pSrc->m_tmTotalWorkedTimeUS;
dDst.m_tmTotalWorkedCPUTimeUS = pSrc->m_tmTotalWorkedCPUTimeUS;
dDst.m_iTotalJobsDone = pSrc->m_iTotalJobsDone;
dDst.m_sThreadName = pSrc->m_sThreadName;
}
void RenderPublicTaskInfo ( const void * pSrc, PublicThreadDesc_t & dDst, BYTE eType )
{
if ( pInfos[eType] )
pInfos[eType] ( pSrc, dDst );
}
void GatherPublicTaskInfo ( PublicThreadDesc_t& dDst, const std::atomic<void*>& pTask )
{
hazard::Guard_c tGuard;
auto pSrcInfo = (TaskInfo_t*)tGuard.Protect ( pTask );
while ( pSrcInfo )
{
RenderPublicTaskInfo ( pSrcInfo, dDst, pSrcInfo->m_eType );
pSrcInfo = (TaskInfo_t*)tGuard.Protect ( pSrcInfo->m_pPrev );
}
tGuard.Release();
}
PublicThreadDesc_t GatherPublicThreadInfo ( const Threads::LowThreadDesc_t * pSrc, int iCols )
{
PublicThreadDesc_t dDst;
if (!pSrc)
return dDst;
dDst.m_iDescriptionLimit = iCols; // works as call-back
GatherPublicTaskInfo ( dDst, pSrc->m_pTaskInfo );
CopyBasicThreadInfo ( pSrc, dDst );
return dDst;
}
TaskInfo_t* myinfo::HazardTaskInfo()
{
return (TaskInfo_t*)Threads::MyThd().m_pTaskInfo.load ( std::memory_order_acquire );
}
TaskInfo_t* myinfo::GetHazardTypedNode ( BYTE eType )
{
return HazardGetNode ( [eType] ( TaskInfo_t* pNode ) { return pNode->m_eType == eType; } );
}
// bind current taskinfo content to handler
Threads::Handler myinfo::StickParent ( Threads::Handler fnHandler )
{
auto pParent = myinfo::HazardTaskInfo();
return [pParent, fnHandler = std::move ( fnHandler )] {
Threads::MyThd().m_pTaskInfo.store ( pParent, std::memory_order_release );
fnHandler();
};
}
// bind current taskinfo and add new scoped mini info for coro handler
Threads::Handler myinfo::OwnMini ( Threads::Handler fnHandler )
{
auto pParent = myinfo::HazardTaskInfo();
return [pParent, fnHandler = std::move ( fnHandler )] {
Threads::MyThd().m_pTaskInfo.store ( pParent, std::memory_order_release );
ScopedMiniInfo_t _ ( new MiniTaskInfo_t );
fnHandler();
};
}
Threads::Handler myinfo::OwnMiniNoCount ( Threads::Handler fnHandler )
{
auto pParent = myinfo::HazardTaskInfo();
return [pParent, fnHandler = std::move ( fnHandler )] {
Threads::MyThd().m_pTaskInfo.store ( pParent, std::memory_order_release );
ScopedMiniInfoNoCount_t _ ( new MiniTaskInfo_t );
fnHandler();
};
}
// generic is empty
DEFINE_RENDER ( TaskInfo_t ) {};
void MiniTaskInfo_t::RenderWithoutChain ( PublicThreadDesc_t& dDst )
{
if ( !dDst.m_tmStart )
dDst.m_tmStart.emplace ( m_tmStart );
dDst.m_tmLastJobStartTimeUS = m_tmLastJobStartTimeUS;
dDst.m_tmLastJobDoneTimeUS = m_tmLastJobDoneTimeUS;
dDst.m_szCommand = m_szCommand;
hazard::Guard_c tGuard;
auto pDescription = tGuard.Protect ( m_pHazardDescription );
if ( pDescription )
{
if ( dDst.m_iDescriptionLimit < 0 ) // no limit
dDst.m_sDescription << *pDescription;
else
dDst.m_sDescription.AppendChunk ( { pDescription->scstr(), Min ( m_iDescriptionLen, dDst.m_iDescriptionLimit ) } );
}
}
DEFINE_RENDER ( MiniTaskInfo_t )
{
dDst.m_sChain << "Mini ";
auto& tInfo = *(MiniTaskInfo_t*)pSrc;
tInfo.RenderWithoutChain ( dDst );
}
void SetMiniDescription ( MiniTaskInfo_t * pNode, CSphString * pString, int iLen )
{
assert ( pNode );
assert ( pString );
if ( pNode->m_iDescriptionLen>myinfo::HazardDescriptionSizeLimit )
pNode->m_pHazardDescription.RetireNow ( pString );
else
pNode->m_pHazardDescription = pString;
pNode->m_iDescriptionLen = iLen;
pNode->m_tmStart = sphMicroTimer();
}
void SetMiniDescription ( MiniTaskInfo_t * pNode, const char * sTemplate, ... )
{
assert ( pNode );
StringBuilder_c sBuf;
va_list ap;
va_start ( ap, sTemplate );
sBuf.vSprintf ( sTemplate, ap );
va_end ( ap );
auto pString = new CSphString;
auto iLen = sBuf.GetLength();
sBuf.MoveTo ( *pString );
SetMiniDescription ( pNode, pString, iLen );
}
void myinfo::SetCommand ( const char * szCommand )
{
auto pNode = HazardGetMini ();
if ( pNode )
{
pNode->m_szCommand = szCommand;
pNode->m_tmLastJobStartTimeUS = sphMicroTimer();
pNode->m_tmLastJobDoneTimeUS = -1;
}
else
sphWarning ( "internal error: myinfo::SetCommand () invoked with empty tls!" );
}
void myinfo::SetCommandDone()
{
auto pNode = HazardGetMini();
if ( pNode )
{
pNode->m_tmLastJobDoneTimeUS = sphMicroTimer();
} else
sphWarning ( "internal error: myinfo::SetCommand () invoked with empty tls!" );
}
Str_t myinfo::UnsafeDescription ()
{
auto pNode = HazardGetMini ();
assert (pNode);
if ( pNode )
{
if ( pNode->m_pHazardDescription )
return { pNode->m_pHazardDescription->cstr (), pNode->m_iDescriptionLen };
else
return dEmptyStr;
}
sphWarning ( "internal error: myinfo::Description () invoked with empty tls!" );
return dEmptyStr;
}
void myinfo::SetDescription ( CSphString sString, int iLen )
{
auto pNode = HazardGetMini ();
assert ( pNode );
if ( !pNode )
{
sphWarning ( "internal error: myinfo::SetDescription () invoked with empty tls!" );
return;
}
SetMiniDescription ( pNode, new CSphString ( std::move ( sString ) ), iLen );
}
void myinfo::SetTaskInfo ( const char * sTemplate, ... )
{
auto pNode = HazardGetMini ();
assert ( pNode );
if ( !pNode )
{
sphWarning ( "internal error: myinfo::SetTaskInfo () invoked with empty tls!" );
return;
}
StringBuilder_c sBuf;
va_list ap;
va_start ( ap, sTemplate );
sBuf.vSprintf ( sTemplate, ap );
va_end ( ap );
auto pString = new CSphString;
auto iLen = sBuf.GetLength();
sBuf.MoveTo ( *pString );
SetMiniDescription ( pNode, pString, iLen );
}
MiniTaskInfo_t * MakeSystemInfo ( const char * sDescription )
{
auto pInfo = new MiniTaskInfo_t;
pInfo->m_szCommand = "SYSTEM";
pInfo->m_tmLastJobStartTimeUS = sphMicroTimer();
SetMiniDescription( pInfo, "SYSTEM %s", sDescription );
return pInfo;
}
ScopedMiniInfo_t PublishSystemInfo ( const char * sDescription )
{
return ScopedMiniInfo_t ( MakeSystemInfo ( sDescription ) );
}
| 8,901
|
C++
|
.cpp
| 273
| 30.619048
| 118
| 0.724126
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,830
|
client_task_info.cpp
|
manticoresoftware_manticoresearch/src/client_task_info.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "client_task_info.h"
#include "client_session.h"
std::atomic<int> ClientTaskInfo_t::m_iClients { 0 };
std::atomic<int> ClientTaskInfo_t::m_iVips { 0 };
std::atomic<int> ClientTaskInfo_t::m_iBuddy { 0 };
DEFINE_RENDER ( ClientTaskInfo_t )
{
auto& tInfo = *(ClientTaskInfo_t*)pSrc;
((MiniTaskInfo_t&)tInfo).RenderWithoutChain ( dDst );
dDst.m_sClientName << tInfo.m_sClientName;
if ( tInfo.m_bVip )
dDst.m_sClientName << "vip";
dDst.m_iConnID = tInfo.m_iConnID;
dDst.m_eTaskState = tInfo.m_eTaskState;
dDst.m_eProto = tInfo.m_eProto;
dDst.m_bKilled = tInfo.m_bKilled;
dDst.m_sProto << ProtoName ( tInfo.m_eProto );
dDst.m_sChain << "Conn ";
if ( tInfo.m_bSsl )
dDst.m_sProto << "ssl";
}
MiniTaskInfo_t* myinfo::HazardGetMini()
{
return (MiniTaskInfo_t*)myinfo::HazardGetNode ( [] ( TaskInfo_t* pNode ) {
return pNode->m_eType == MiniTaskInfo_t::Task() || pNode->m_eType == ClientTaskInfo_t::Task();
} );
}
ClientTaskInfo_t * HazardGetClient ()
{
return (ClientTaskInfo_t *) myinfo::GetHazardTypedNode ( ClientTaskInfo_t::Task() );
}
ClientTaskInfo_t & ClientTaskInfo_t::Info ( bool bStrict ) noexcept
{
auto * pInfo = HazardGetClient();
if ( !pInfo )
{
static ClientTaskInfo_t tStub;
pInfo = &tStub;
if ( bStrict )
sphWarning ( "internal error: session::Info () invoked with empty tls!" );
}
return *pInfo;
}
void ClientTaskInfo_t::SetTaskState ( TaskState_e eState )
{
m_eTaskState = eState;
m_tmStart = sphMicroTimer();
}
void ClientTaskInfo_t::SetClientSession ( ClientSession_c* pSession )
{
m_pSession = pSession;
}
ClientSession_c* ClientTaskInfo_t::GetClientSession()
{
return m_pSession;
}
void ClientTaskInfo_t::SetBuddy ( bool bBuddy )
{
// FIXME!!! remove inconsistency increase counter here but decrease at the ~ScopedClientInfo_c
if ( bBuddy!=m_bBuddy )
{
m_bBuddy = bBuddy;
if ( bBuddy )
m_iBuddy.fetch_add ( 1, std::memory_order_relaxed );
else
m_iBuddy.fetch_sub ( 1, std::memory_order_relaxed );
}
}
namespace {
volatile int g_iDistThreads = 0;
}
volatile int &getDistThreads ()
{
return g_iDistThreads;
}
int GetEffectiveDistThreads ()
{
auto iSessionVal = ClientTaskInfo_t::Info().m_iDistThreads;
return iSessionVal ? iSessionVal : getDistThreads ();
}
Dispatcher::Template_t GetEffectiveBaseDispatcherTemplate()
{
auto tDispatcher = Dispatcher::GetGlobalBaseDispatcherTemplate();
Dispatcher::Unify ( tDispatcher, ClientTaskInfo_t::Info().GetBaseDispatcherTemplate() );
return tDispatcher;
}
Dispatcher::Template_t GetEffectivePseudoShardingDispatcherTemplate()
{
auto tDispatcher = Dispatcher::GetGlobalPseudoShardingDispatcherTemplate();
Dispatcher::Unify ( tDispatcher, ClientTaskInfo_t::Info().GetPseudoShardingDispatcherTemplate() );
return tDispatcher;
}
| 3,148
|
C++
|
.cpp
| 101
| 29.346535
| 99
| 0.745376
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,831
|
skip_cache.cpp
|
manticoresoftware_manticoresearch/src/skip_cache.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "skip_cache.h"
#include "std/ints.h"
#include "std/crc32.h"
#include "std/lrucache.h"
#include "sphinxsearch.h"
bool operator== ( const SkipCacheKey_t& lhs, const SkipCacheKey_t& rhs ) noexcept
{
return lhs.m_iIndexId == rhs.m_iIndexId && lhs.m_tWordId == rhs.m_tWordId;
}
struct SkipCacheUtil_t
{
static DWORD GetHash ( SkipCacheKey_t tKey )
{
DWORD uCRC32 = sphCRC32 ( &tKey.m_iIndexId, sizeof ( tKey.m_iIndexId ) );
return sphCRC32 ( &tKey.m_tWordId, sizeof ( tKey.m_tWordId ), uCRC32 );
}
static DWORD GetSize ( SkipData_t* pValue ) { return pValue ? pValue->m_dSkiplist.GetLengthBytes() : 0; }
static void Reset ( SkipData_t*& pValue ) { SafeDelete ( pValue ); }
};
class SkipCache_c: public LRUCache_T<SkipCacheKey_t, SkipData_t*, SkipCacheUtil_t>
{
using BASE = LRUCache_T<SkipCacheKey_t, SkipData_t*, SkipCacheUtil_t>;
using BASE::BASE;
public:
void DeleteAll ( int64_t iIndexId )
{
BASE::Delete ( [iIndexId] ( const SkipCacheKey_t& tKey ) { return tKey.m_iIndexId == iIndexId; } );
}
static void Init ( int64_t iCacheSize );
static void Done() { SafeDelete ( m_pSkipCache ); }
static SkipCache_c* Get() { return m_pSkipCache; }
private:
static SkipCache_c* m_pSkipCache;
};
SkipCache_c* SkipCache_c::m_pSkipCache = nullptr;
void SkipCache_c::Init ( int64_t iCacheSize )
{
assert ( !m_pSkipCache );
if ( iCacheSize > 0 )
m_pSkipCache = new SkipCache_c ( iCacheSize );
}
void InitSkipCache ( int64_t iCacheSize )
{
SkipCache_c::Init ( iCacheSize );
}
void ShutdownSkipCache()
{
SkipCache_c::Done();
}
void SkipCache::DeleteAll ( int64_t iIndexId )
{
SkipCache_c* pSkipCache = SkipCache_c::Get();
if ( pSkipCache )
pSkipCache->DeleteAll ( iIndexId );
}
void SkipCache::Release ( SkipCacheKey_t tKey )
{
SkipCache_c* pSkipCache = SkipCache_c::Get();
if ( pSkipCache )
pSkipCache->Release ( std::move ( tKey ) );
}
bool SkipCache::Find ( SkipCacheKey_t tKey, SkipData_t * & pData )
{
SkipCache_c* pSkipCache = SkipCache_c::Get();
if ( pSkipCache )
return pSkipCache->Find ( std::move ( tKey ), pData );
return false;
}
bool SkipCache::Add ( SkipCacheKey_t tKey, SkipData_t* pData )
{
SkipCache_c* pSkipCache = SkipCache_c::Get();
if ( pSkipCache )
return pSkipCache->Add ( std::move ( tKey ), pData );
return false;
}
| 2,774
|
C++
|
.cpp
| 86
| 30.476744
| 106
| 0.719805
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,832
|
indexcheck.cpp
|
manticoresoftware_manticoresearch/src/indexcheck.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "indexcheck.h"
#include "fileutils.h"
#include "attribute.h"
#include "indexformat.h"
#include "docidlookup.h"
#include "docstore.h"
#include "conversion.h"
#include "columnarlib.h"
#include "indexfiles.h"
#include "killlist.h"
constexpr int FAILS_THRESH = 100;
class DebugCheckError_c final : public DebugCheckError_i
{
public:
DebugCheckError_c ( FILE* pFile, const DocID_t* pExtract );
bool Fail ( const char* szFmt, ... ) final;
void Msg ( const char* szFmt, ... ) final;
void Progress ( const char* szFmt, ... ) final;
void Done() final;
int64_t GetNumFails() const final;
const DocID_t* GetExtractDocs() const final { return m_pExtract; };
private:
FILE* m_pFile { nullptr };
bool m_bProgress { false };
int64_t m_tStartTime { 0 };
int64_t m_nFails { 0 };
int64_t m_nFailsPrinted { 0 };
const DocID_t* m_pExtract;
};
DebugCheckError_c::DebugCheckError_c ( FILE * pFile, const DocID_t* pExtract )
: m_pFile ( pFile )
, m_pExtract { pExtract }
{
assert ( pFile );
m_bProgress = isatty ( fileno ( pFile ) )!=0;
m_tStartTime = sphMicroTimer();
}
void DebugCheckError_c::Msg ( const char * szFmt, ... )
{
assert ( m_pFile );
va_list ap;
va_start ( ap, szFmt );
vfprintf ( m_pFile, szFmt, ap );
fprintf ( m_pFile, "\n" );
va_end ( ap );
}
bool DebugCheckError_c::Fail ( const char * szFmt, ... )
{
assert ( m_pFile );
if ( ++m_nFails>=FAILS_THRESH )
return false;
va_list ap;
va_start ( ap, szFmt );
fprintf ( m_pFile, "FAILED, " );
vfprintf ( m_pFile, szFmt, ap );
fprintf ( m_pFile, "\n" );
va_end ( ap );
m_nFailsPrinted++;
if ( m_nFailsPrinted==FAILS_THRESH )
fprintf ( m_pFile, "(threshold reached; suppressing further output)\n" );
return false;
}
void DebugCheckError_c::Progress ( const char * szFmt, ... )
{
if ( !m_bProgress )
return;
assert ( m_pFile );
va_list ap;
va_start ( ap, szFmt );
vfprintf ( m_pFile, szFmt, ap );
fprintf ( m_pFile, "\r" );
va_end ( ap );
fflush ( m_pFile );
}
void DebugCheckError_c::Done()
{
assert ( m_pFile );
// well, no known kinds of failures, maybe some unknown ones
int64_t tmCheck = sphMicroTimer() - m_tStartTime;
if ( !m_nFails )
fprintf ( m_pFile, "check passed" );
else if ( m_nFails!=m_nFailsPrinted )
fprintf ( m_pFile, "check FAILED, " INT64_FMT " of " INT64_FMT " failures reported", m_nFailsPrinted, m_nFails );
else
fprintf ( m_pFile, "check FAILED, " INT64_FMT " failures reported", m_nFails );
fprintf ( m_pFile, ", %d.%d sec elapsed\n", (int)(tmCheck/1000000), (int)((tmCheck/100000)%10) );
}
int64_t DebugCheckError_c::GetNumFails() const
{
return m_nFails;
}
DebugCheckError_i* MakeDebugCheckError ( FILE* fp, DocID_t* pExtract )
{
return new DebugCheckError_c ( fp, pExtract );
}
//////////////////////////////////////////////////////////////////////////
class FileDebugCheckReader_c final : public DebugCheckReader_i
{
public:
explicit FileDebugCheckReader_c ( CSphAutoreader * pReader )
: m_pReader ( pReader )
{}
int64_t GetLengthBytes() final
{
return ( m_pReader ? m_pReader->GetFilesize() : 0 );
}
bool GetBytes ( void * pData, int iSize ) final
{
if ( !m_pReader )
return false;
m_pReader->GetBytes ( pData, iSize );
return !m_pReader->GetErrorFlag();
}
bool SeekTo ( int64_t iOff, int iHint ) final
{
if ( !m_pReader )
return false;
m_pReader->SeekTo ( iOff, iHint );
return !m_pReader->GetErrorFlag();
}
private:
CSphAutoreader * m_pReader = nullptr;
};
void DebugCheckHelper_c::DebugCheck_Attributes ( DebugCheckReader_i & tAttrs, DebugCheckReader_i & tBlobs, int64_t nRows, int64_t iMinMaxBytes, const CSphSchema & tSchema, DebugCheckError_i & tReporter ) const
{
// empty?
if ( !tAttrs.GetLengthBytes() )
return;
tReporter.Msg ( "checking rows..." );
if ( !tSchema.GetAttrsCount() )
tReporter.Fail ( "no attributes in schema; schema should at least have '%s' attr", sphGetDocidName() );
if ( tSchema.GetAttr(0).m_sName!=sphGetDocidName() )
tReporter.Fail ( "first attribute in schema should be '%s'", tSchema.GetAttr(0).m_sName.cstr() );
if ( tSchema.GetAttr(0).m_eAttrType!=SPH_ATTR_BIGINT )
tReporter.Fail ( "%s attribute should be BIGINT", sphGetDocidName() );
const CSphColumnInfo * pBlobLocator = nullptr;
[[maybe_unused]] int nBlobAttrs = 0;
if ( tSchema.HasBlobAttrs() )
{
pBlobLocator = tSchema.GetAttr ( sphGetBlobLocatorName() );
if ( !pBlobLocator )
tReporter.Fail ( "schema has blob attrs, but no blob locator '%s'", sphGetBlobLocatorName() );
if ( tSchema.GetAttr(1).m_sName!=sphGetBlobLocatorName() )
tReporter.Fail ( "second attribute in schema should be '%s'", sphGetBlobLocatorName() );
if ( tSchema.GetAttr(1).m_eAttrType!=SPH_ATTR_BIGINT )
tReporter.Fail ( "%s attribute should be BIGINT", sphGetBlobLocatorName() );
if ( !tBlobs.GetLengthBytes() )
tReporter.Fail ( "schema has blob attrs, but blob file is empty" );
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
if ( sphIsBlobAttr ( tSchema.GetAttr(i) ) )
nBlobAttrs++;
} else
{
if ( tBlobs.GetLengthBytes() )
tReporter.Fail ( "schema has no blob attrs but has blob rows" );
}
// sizes and counts
DWORD uStride = tSchema.GetRowSize();
int64_t iAttrElemCount = ( tAttrs.GetLengthBytes() - iMinMaxBytes ) / sizeof(CSphRowitem);
int64_t iAttrExpected = nRows*uStride;
if ( iAttrExpected > iAttrElemCount )
tReporter.Fail ( "rowitems count mismatch (expected=" INT64_FMT ", loaded=" INT64_FMT ")", iAttrExpected, iAttrElemCount );
CSphVector<CSphAttrLocator> dFloatItems;
for ( int i=0; i<tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = tSchema.GetAttr(i);
if ( tAttr.m_eAttrType==SPH_ATTR_FLOAT && !tAttr.IsColumnar() )
dFloatItems.Add ( tAttr.m_tLocator );
}
CSphFixedVector<CSphRowitem> dRow ( tSchema.GetRowSize() );
const CSphRowitem * pRow = dRow.Begin();
tAttrs.SeekTo ( 0, (int) dRow.GetLengthBytes() );
for ( int64_t iRow=0; iRow<nRows; iRow++ )
{
tAttrs.GetBytes ( dRow.Begin(), (int) dRow.GetLengthBytes() );
DocID_t tDocID = sphGetDocID(pRow);
///////////////////////////
// check blobs
///////////////////////////
if ( pBlobLocator )
{
int64_t iBlobOffset = sphGetRowAttr ( pRow, pBlobLocator->m_tLocator );
CSphString sError;
if ( !sphCheckBlobRow ( iBlobOffset, tBlobs, tSchema, sError ) )
tReporter.Fail ( "%s at offset " INT64_FMT ", docid=" INT64_FMT ", rowid=" INT64_FMT " of " INT64_FMT, sError.cstr(), iBlobOffset, tDocID, iRow, nRows );
}
///////////////////////////
// check floats
///////////////////////////
ARRAY_FOREACH ( iItem, dFloatItems )
{
const DWORD uValue = (DWORD)sphGetRowAttr ( pRow, dFloatItems[ iItem ] );
const DWORD uExp = ( uValue >> 23 ) & 0xff;
const DWORD uMantissa = uValue & 0x003fffff;
// check normalized
if ( uExp==0 && uMantissa!=0 )
tReporter.Fail ( "float attribute value is unnormalized (row=" INT64_FMT ", attr=%d, id=" INT64_FMT ", raw=0x%x, value=%f)", iRow, iItem, tDocID, uValue, sphDW2F ( uValue ) );
// check +-inf
if ( uExp==0xff && uMantissa==0 )
tReporter.Fail ( "float attribute is infinity (row=" INT64_FMT ", attr=%d, id=" INT64_FMT ", raw=0x%x, value=%f)", iRow, iItem, tDocID, uValue, sphDW2F ( uValue ) );
}
}
}
void DebugCheckHelper_c::DebugCheck_DeadRowMap ( int64_t iSizeBytes, int64_t nRows, DebugCheckError_i & tReporter ) const
{
tReporter.Msg ( "checking dead row map..." );
int64_t nExpectedEntries = int(( nRows+31 ) / 32);
int64_t iExpectedSize = nExpectedEntries*sizeof(DWORD);
if ( iSizeBytes!=iExpectedSize )
tReporter.Fail ( "unexpected dead row map: " INT64_FMT ", expected: " INT64_FMT " bytes", iSizeBytes, iExpectedSize );
}
//////////////////////////////////////////////////////////////////////////
class CheckError_c
{
CSphString m_sWhat;
public:
explicit CheckError_c ( const char* szWhat )
: m_sWhat { SphSprintf ("%s", szWhat ) }
{}
CheckError_c ( const char* szWhat, const CSphString& sError )
: m_sWhat { SphSprintf ( "%s: %s", szWhat, sError.scstr() ) }
{}
CheckError_c ( const char* szTemplate, ... ) __attribute__ ( ( format ( printf, 2, 3 ) ) )
{
va_list ap;
va_start ( ap, szTemplate );
m_sWhat.SetSprintfVa ( szTemplate, ap );
va_end ( ap );
}
const char* sWhat() const noexcept { return m_sWhat.scstr(); }
};
//////////////////////////////////////////////////////////////////////////
struct Wordid_t
{
bool m_bWordDict;
union {
SphWordID_t m_uWordid;
const char* m_szWordid;
};
};
static StringBuilder_c& operator<< ( StringBuilder_c& dOut, const Wordid_t& tWordID )
{
ScopedComma_c _ (dOut, dEmptyBl);
if ( tWordID.m_bWordDict )
return dOut << tWordID.m_szWordid;
return dOut << "(hash) " << tWordID.m_uWordid;
}
static JsonEscapedBuilder& operator<< ( JsonEscapedBuilder& dOut, const Wordid_t& tWordID )
{
if ( tWordID.m_bWordDict )
dOut.NamedString ( "token", tWordID.m_szWordid );
else
dOut.NamedVal ( "crc", tWordID.m_uWordid );
return dOut;
}
using cbWordidFn = std::function<void ( RowID_t, Wordid_t, int iField, int iPos, bool bIsEnd )>;
class DiskIndexChecker_c::Impl_c : public DebugCheckHelper_c
{
public:
Impl_c ( CSphIndex & tIndex, DebugCheckError_i & tReporter );
bool OpenFiles ();
void Setup ( int64_t iNumRows, int64_t iDocinfoIndex, int64_t iMinMaxIndex, bool bCheckIdDups );
CSphVector<SphWordID_t> & GetHitlessWords() { return m_dHitlessWords; }
void Check();
void ExtractDocs ();
private:
CSphIndex & m_tIndex;
CSphAutoreader m_tDictReader;
DataReaderFactoryPtr_c m_pDocsReader;
DataReaderFactoryPtr_c m_pHitsReader;
CSphAutoreader m_tSkipsReader;
CSphAutoreader m_tDeadRowReader;
CSphAutoreader m_tAttrReader;
CSphAutoreader m_tBlobReader;
CSphAutoreader m_tDocstoreReader;
CSphVector<SphWordID_t> m_dHitlessWords;
DebugCheckError_i & m_tReporter;
bool m_bHasBlobs = false;
bool m_bHasDocstore = false;
bool m_bIsEmpty = false;
DWORD m_uVersion = 0;
int64_t m_iNumRows = 0;
int64_t m_iDocinfoIndex = 0;
int64_t m_iMinMaxIndex = 0;
bool m_bCheckIdDups = false;
CSphSchema m_tSchema;
CWordlist m_tWordlist;
RowID_t GetRowidByDocid ( DocID_t iDocid ) const;
RowID_t CheckIfKilled ( RowID_t iRowID ) const;
void CheckDictionary();
void CheckDocs ( cbWordidFn&& cbfndoc = nullptr );
void CheckAttributes();
void CheckKillList() const;
void CheckBlockIndex();
void CheckColumnar();
void CheckDocidLookup();
void CheckDocids();
void CheckDocstore();
void CheckSchema();
bool ReadLegacyHeader ( CSphString& sError );
bool ReadHeader ( CSphString& sError );
CSphString GetFilename ( ESphExt eExt ) const;
};
DiskIndexChecker_c::Impl_c::Impl_c ( CSphIndex & tIndex, DebugCheckError_i & tReporter )
: m_tIndex ( tIndex )
, m_tReporter ( tReporter )
{}
bool DiskIndexChecker_c::Impl_c::ReadLegacyHeader ( CSphString& sError )
{
CSphAutoreader tHeaderReader;
if ( !tHeaderReader.Open ( GetFilename ( SPH_EXT_SPH ), sError ) )
return false;
const char * szHeader = tHeaderReader.GetFilename().cstr();
// magic header
const char * szFmt = CheckFmtMagic ( tHeaderReader.GetDword() );
if ( szFmt )
{
sError.SetSprintf ( szFmt, szHeader );
return false;
}
// version
m_uVersion = tHeaderReader.GetDword();
if ( m_uVersion<=1 || m_uVersion>INDEX_FORMAT_VERSION )
{
sError.SetSprintf ( "%s is v.%u, binary is v.%u", szHeader, m_uVersion, INDEX_FORMAT_VERSION );
return false;
}
// we don't support anything prior to v54
DWORD uMinFormatVer = 54;
if ( m_uVersion<uMinFormatVer )
{
sError.SetSprintf ( "tables prior to v.%u are no longer supported (use index_converter tool); %s is v.%u", uMinFormatVer, szHeader, m_uVersion );
return false;
}
// schema
ReadSchema ( tHeaderReader, m_tSchema, m_uVersion );
// dictionary header (wordlist checkpoints, infix blocks, etc)
m_tWordlist.m_iDictCheckpointsOffset = tHeaderReader.GetOffset();
m_tWordlist.m_iDictCheckpoints = tHeaderReader.GetDword();
m_tWordlist.m_iInfixCodepointBytes = tHeaderReader.GetByte();
m_tWordlist.m_iInfixBlocksOffset = tHeaderReader.GetDword();
m_tWordlist.m_iInfixBlocksWordsSize = tHeaderReader.GetDword();
m_tWordlist.m_dCheckpoints.Reset ( m_tWordlist.m_iDictCheckpoints );
return m_tWordlist.Preread ( GetFilename(SPH_EXT_SPI), m_tIndex.GetDictionary()->GetSettings().m_bWordDict, m_tIndex.GetSettings().m_iSkiplistBlockSize, sError );
// FIXME! add more header checks
}
bool DiskIndexChecker_c::Impl_c::ReadHeader ( CSphString& sError )
{
bool bHeaderIsJson;
{
BYTE dBuffer[8];
CSphAutoreader tHeaderReader ( dBuffer, sizeof ( dBuffer ) );
if ( !tHeaderReader.Open ( GetFilename ( SPH_EXT_SPH ), sError ) )
return false;
tHeaderReader.GetDword();
bHeaderIsJson = dBuffer[0] == '{';
}
if ( !bHeaderIsJson ) // that is old style binary header
return ReadLegacyHeader ( sError );
auto sHeader = GetFilename ( SPH_EXT_SPH );
const char* szHeader = sHeader.scstr();
using namespace bson;
CSphVector<BYTE> dData;
if ( !sphJsonParse ( dData, GetFilename ( SPH_EXT_SPH ), sError ) )
return false;
Bson_c tBson ( dData );
if ( tBson.IsEmpty() || !tBson.IsAssoc() )
{
sError = "Something wrong read from json header - it is either empty, either not root object.";
return false;
}
// version
m_uVersion = (DWORD)Int ( tBson.ChildByName ( "index_format_version" ) );
if ( m_uVersion <= 1 || m_uVersion > INDEX_FORMAT_VERSION )
{
sError.SetSprintf ( "%s is v.%u, binary is v.%u", szHeader, m_uVersion, INDEX_FORMAT_VERSION );
return false;
}
// we don't support anything prior to v64 with json format
DWORD uMinFormatVer = 64;
if ( m_uVersion < uMinFormatVer )
{
sError.SetSprintf ( "tables prior to v.%u are no longer supported (use index_converter tool); %s is v.%u", uMinFormatVer, szHeader, m_uVersion );
return false;
}
// schema
ReadSchemaJson ( tBson.ChildByName ( "schema" ), m_tSchema );
// dictionary header (wordlist checkpoints, infix blocks, etc)
m_tWordlist.m_iDictCheckpointsOffset = Int ( tBson.ChildByName ( "dict_checkpoints_offset" ) );
m_tWordlist.m_iDictCheckpoints = (int)Int ( tBson.ChildByName ( "dict_checkpoints" ) );
m_tWordlist.m_iInfixCodepointBytes = (int)Int ( tBson.ChildByName ( "infix_codepoint_bytes" ) );
m_tWordlist.m_iInfixBlocksOffset = Int ( tBson.ChildByName ( "infix_blocks_offset" ) );
m_tWordlist.m_iInfixBlocksWordsSize = (int)Int ( tBson.ChildByName ( "infix_block_words_size" ) );
m_tWordlist.m_dCheckpoints.Reset ( m_tWordlist.m_iDictCheckpoints );
return m_tWordlist.Preread ( GetFilename ( SPH_EXT_SPI ), m_tIndex.GetDictionary()->GetSettings().m_bWordDict, m_tIndex.GetSettings().m_iSkiplistBlockSize, sError );
// FIXME! add more header checks
}
bool DiskIndexChecker_c::Impl_c::OpenFiles ()
{
CSphString sError;
if ( !ReadHeader ( sError ) )
return m_tReporter.Fail ( "error reading table header: %s", sError.cstr() );
if ( !m_tDictReader.Open ( GetFilename ( SPH_EXT_SPI ), sError ) )
return m_tReporter.Fail ( "unable to open dictionary: %s", sError.cstr() );
// use file reader during debug check to lower memory pressure
m_pDocsReader = NewProxyReader ( GetFilename(SPH_EXT_SPD), sError, DataReaderFactory_c::DOCS, m_tIndex.GetMutableSettings().m_tFileAccess.m_iReadBufferDocList, FileAccess_e::FILE );
if ( !m_pDocsReader )
return m_tReporter.Fail ( "unable to open doclist: %s", sError.cstr() );
// use file reader during debug check to lower memory pressure
m_pHitsReader = NewProxyReader ( GetFilename(SPH_EXT_SPP), sError, DataReaderFactory_c::HITS, m_tIndex.GetMutableSettings().m_tFileAccess.m_iReadBufferHitList, FileAccess_e::FILE );
if ( !m_pHitsReader )
return m_tReporter.Fail ( "unable to open hitlist: %s", sError.cstr() );
if ( !m_tSkipsReader.Open ( GetFilename(SPH_EXT_SPE), sError ) )
return m_tReporter.Fail ( "unable to open skiplist: %s", sError.cstr () );
if ( !m_tDeadRowReader.Open ( GetFilename(SPH_EXT_SPM).cstr(), sError ) )
return m_tReporter.Fail ( "unable to open dead-row map: %s", sError.cstr() );
if ( m_tSchema.HasNonColumnarAttrs() && !m_tAttrReader.Open ( GetFilename(SPH_EXT_SPA).cstr(), sError ) )
return m_tReporter.Fail ( "unable to open attributes: %s", sError.cstr() );
if ( m_tSchema.GetAttr ( sphGetBlobLocatorName() ) )
{
if ( !m_tBlobReader.Open ( GetFilename(SPH_EXT_SPB), sError ) )
return m_tReporter.Fail ( "unable to open blobs: %s", sError.cstr() );
m_bHasBlobs = true;
}
if ( m_uVersion>=57 && ( m_tSchema.HasStoredFields() || m_tSchema.HasStoredAttrs() ) )
{
if ( !m_tDocstoreReader.Open ( GetFilename(SPH_EXT_SPDS).cstr(), sError ) )
return m_tReporter.Fail ( "unable to open docstore: %s", sError.cstr() );
m_bHasDocstore = true;
}
m_bIsEmpty = m_iNumRows==0;
return true;
}
void DiskIndexChecker_c::Impl_c::Setup ( int64_t iNumRows, int64_t iDocinfoIndex, int64_t iMinMaxIndex, bool bCheckIdDups )
{
m_iNumRows = iNumRows;
m_iDocinfoIndex = iDocinfoIndex;
m_iMinMaxIndex = iMinMaxIndex;
m_bCheckIdDups = bCheckIdDups;
}
struct WordVariantHit_t
{
CSphString m_sWord;
Wordid_t m_tWord;
bool m_bIsLast;
};
struct WordHit_t
{
CSphVector<WordVariantHit_t> m_dHits;
int m_iPos = -1;
void AddWord ( Wordid_t tWord, bool bIsLast )
{
auto& dHit = m_dHits.Add();
dHit.m_bIsLast = bIsLast;
dHit.m_tWord = tWord;
if ( tWord.m_bWordDict )
{
dHit.m_sWord = tWord.m_szWordid;
dHit.m_tWord.m_szWordid = dHit.m_sWord.cstr();
}
}
void Print ( StringBuilder_c& sOut, bool bLast )
{
if ( m_iPos < 0 )
{
sOut << "..";
return;
}
auto fnPrintHit = [&sOut,bLast] (const WordVariantHit_t& dHit) {
ScopedComma_c _ ( sOut, dEmptyBl );
sOut << dHit.m_tWord;
if ( !bLast && dHit.m_bIsLast )
sOut << "<EOF>";
if ( bLast && !dHit.m_bIsLast )
sOut << "...";
};
if ( m_dHits.GetLength()==1 )
fnPrintHit(m_dHits[0]);
else
{
ScopedComma_c sDivider ( sOut, StrBlock_t { FROMS ( "|" ), FROMS ( "[" ), FROMS ( "]" ) } );
for ( const auto& dHit : m_dHits )
fnPrintHit(dHit);
}
}
};
struct DocField_t {
int m_iField = -1;
CSphVector<WordHit_t> m_dHits;
};
inline static void FormatWordHit ( JsonEscapedBuilder& dOut, const Wordid_t& tWord, int iField, int iPos, bool bIsLast )
{
auto tObj = dOut.Object();
dOut << tWord;
dOut.NamedVal ( "field", iField );
dOut.NamedVal ( "pos", iPos );
dOut.NamedValNonDefault ( "is_last", bIsLast, false );
}
void DiskIndexChecker_c::Impl_c::ExtractDocs ()
{
assert ( m_tReporter.GetExtractDocs() );
auto uDocID = *m_tReporter.GetExtractDocs();
auto iRowID = GetRowidByDocid ( uDocID );
bool bIsKilled = ( INVALID_ROWID == CheckIfKilled ( iRowID ) );
CSphVector<DocField_t> dFields;
if ( iRowID!=INVALID_ROWID )
{
m_tReporter.Msg ( "\n# Restored document\n## Cloud of tokens\n\n```json\n[" );
CheckDocs ( [&dFields, iRowID, bIsNotFirst=false, this] ( RowID_t tRow, Wordid_t tWord, int iField, int iPos, bool bIsLast ) mutable {
if ( iRowID!=tRow )
return;
if ( dFields.GetLength() < iField + 1 )
dFields.Resize ( iField + 1 );
auto& dField = dFields[iField];
dField.m_iField = iField;
if ( dField.m_dHits.GetLength() < iPos )
dField.m_dHits.Resize ( iPos );
auto& dHit = dField.m_dHits[iPos - 1];
dHit.AddWord ( tWord, bIsLast );
dHit.m_iPos = iPos;
JsonEscapedBuilder sReport;
if ( std::exchange ( bIsNotFirst, true ) )
sReport << ',';
FormatWordHit ( sReport, tWord, iField, iPos, bIsLast );
m_tReporter.Msg ( "%s", sReport.cstr() );
});
m_tReporter.Msg ( "]\n```\n" );
}
StringBuilder_c sReport;
if ( iRowID == INVALID_ROWID )
sReport << "* Document " << uDocID << " is not found";
else {
sReport << "## Document " << uDocID << "\n* RowID " << iRowID << ( bIsKilled ? " (killed)\n" : "\n" );
ARRAY_FOREACH ( i, dFields )
{
const DocField_t& dField = dFields[i];
const CSphColumnInfo& tCol = m_tSchema.GetField ( i );
if ( dField.m_iField < 0 )
sReport << "\n### Field '" << tCol.m_sName << "' is empty.\n";
else {
sReport << "\n### Field '" << tCol.m_sName << "'\n";
ScopedComma_c tSpacer ( sReport, StrBlock_t { FROMS ( " " ), FROMS ( "" ), FROMS ( "\n" ) } );
ARRAY_FOREACH ( k, dField.m_dHits )
dField.m_dHits[k].Print ( sReport, k==dField.m_dHits.GetLength()-1 );
}
}
}
sReport << "\n--- <End of restored document> ---";
m_tReporter.Msg ( "%s", sReport.cstr() );
}
void DiskIndexChecker_c::Impl_c::Check()
{
if ( m_tReporter.GetExtractDocs() )
return ExtractDocs();
CheckSchema();
CheckDictionary();
CheckDocs();
CheckAttributes();
CheckBlockIndex();
CheckColumnar();
CheckKillList();
CheckDocstore();
DebugCheck_DeadRowMap ( m_tDeadRowReader.GetFilesize(), m_iNumRows, m_tReporter );
CheckDocidLookup();
if ( m_bCheckIdDups )
CheckDocids();
}
void DiskIndexChecker_c::Impl_c::CheckDictionary()
{
m_tReporter.Msg ( "checking dictionary..." );
const CSphIndexSettings & tIndexSettings = m_tIndex.GetSettings();
SphWordID_t uWordid = 0;
int64_t iDoclistOffset = 0;
int iWordsTotal = 0;
char sWord[MAX_KEYWORD_BYTES], sLastWord[MAX_KEYWORD_BYTES];
memset ( sWord, 0, sizeof(sWord) );
memset ( sLastWord, 0, sizeof(sLastWord) );
const int iWordPerCP = SPH_WORDLIST_CHECKPOINT;
const bool bWordDict = m_tIndex.GetDictionary()->GetSettings().m_bWordDict;
CSphVector<CSphWordlistCheckpoint> dCheckpoints;
dCheckpoints.Reserve ( m_tWordlist.m_iDictCheckpoints );
CSphVector<char> dCheckpointWords;
CSphAutoreader & tDictReader = m_tDictReader;
tDictReader.GetByte();
int iLastSkipsOffset = 0;
SphOffset_t iWordsEnd = m_tWordlist.GetWordsEnd();
while ( tDictReader.GetPos()!=iWordsEnd && !m_bIsEmpty )
{
// sanity checks
if ( tDictReader.GetPos()>=iWordsEnd )
{
m_tReporter.Fail ( "reading past checkpoints" );
break;
}
// store current entry pos (for checkpointing later), read next delta
const int64_t iDictPos = tDictReader.GetPos();
SphWordID_t iDeltaWord = 0;
if ( bWordDict )
iDeltaWord = tDictReader.GetByte();
else
iDeltaWord = tDictReader.UnzipWordid();
// checkpoint encountered, handle it
if ( !iDeltaWord )
{
tDictReader.UnzipOffset();
if ( ( iWordsTotal%iWordPerCP )!=0 && tDictReader.GetPos()!=iWordsEnd )
m_tReporter.Fail ( "unexpected checkpoint (pos=" INT64_FMT ", word=%d, words=%d, expected=%d)", iDictPos, iWordsTotal, ( iWordsTotal%iWordPerCP ), iWordPerCP );
uWordid = 0;
iDoclistOffset = 0;
continue;
}
SphWordID_t uNewWordid = 0;
SphOffset_t iNewDoclistOffset = 0;
int iDocs = 0;
int iHits = 0;
bool bHitless = false;
if ( bWordDict )
{
// unpack next word
// must be in sync with DictEnd()!
BYTE uPack = (BYTE)iDeltaWord;
int iMatch, iDelta;
if ( uPack & 0x80 )
{
iDelta = ( ( uPack>>4 ) & 7 ) + 1;
iMatch = uPack & 15;
} else
{
iDelta = uPack & 127;
iMatch = tDictReader.GetByte();
}
auto iLastWordLen = (const int) strlen(sLastWord);
if ( iMatch+iDelta>=(int)sizeof(sLastWord)-1 || iMatch>iLastWordLen )
{
m_tReporter.Fail ( "wrong word-delta (pos=" INT64_FMT ", word=%s, len=%d, begin=%d, delta=%d)", iDictPos, sLastWord, iLastWordLen, iMatch, iDelta );
tDictReader.SkipBytes ( iDelta );
} else
{
tDictReader.GetBytes ( sWord+iMatch, iDelta );
sWord [ iMatch+iDelta ] = '\0';
}
iNewDoclistOffset = tDictReader.UnzipOffset();
iDocs = tDictReader.UnzipInt();
iHits = tDictReader.UnzipInt();
int iHint = 0;
if ( iDocs>=DOCLIST_HINT_THRESH )
iHint = tDictReader.GetByte();
iHint = DoclistHintUnpack ( iDocs, (BYTE)iHint );
if ( m_tIndex.GetSettings().m_eHitless==SPH_HITLESS_SOME && ( iDocs & HITLESS_DOC_FLAG )!=0 )
{
iDocs = ( iDocs & HITLESS_DOC_MASK );
bHitless = true;
}
auto iNewWordLen = (const int) strlen(sWord);
if ( iNewWordLen==0 )
m_tReporter.Fail ( "empty word in dictionary (pos=" INT64_FMT ")", iDictPos );
if ( iLastWordLen && iNewWordLen )
if ( sphDictCmpStrictly ( sWord, iNewWordLen, sLastWord, iLastWordLen )<=0 )
m_tReporter.Fail ( "word order decreased (pos=" INT64_FMT ", word=%s, prev=%s)", iDictPos, sLastWord, sWord );
if ( iHint<0 )
m_tReporter.Fail ( "invalid word hint (pos=" INT64_FMT ", word=%s, hint=%d)", iDictPos, sWord, iHint );
if ( iDocs<=0 || iHits<=0 || iHits<iDocs )
m_tReporter.Fail ( "invalid docs/hits (pos=" INT64_FMT ", word=%s, docs=" INT64_FMT ", hits=" INT64_FMT ")", (int64_t)iDictPos, sWord, (int64_t)iDocs, (int64_t)iHits );
memcpy ( sLastWord, sWord, sizeof(sLastWord) );
} else
{
// finish reading the entire entry
uNewWordid = uWordid + iDeltaWord;
iNewDoclistOffset = iDoclistOffset + tDictReader.UnzipOffset();
iDocs = tDictReader.UnzipInt();
iHits = tDictReader.UnzipInt();
bHitless = ( m_dHitlessWords.BinarySearch ( uNewWordid )!=NULL );
if ( bHitless )
iDocs = ( iDocs & HITLESS_DOC_MASK );
if ( uNewWordid<=uWordid )
m_tReporter.Fail ( "wordid decreased (pos=" INT64_FMT ", wordid=" UINT64_FMT ", previd=" UINT64_FMT ")", (int64_t)iDictPos, (uint64_t)uNewWordid, (uint64_t)uWordid );
if ( iNewDoclistOffset<=iDoclistOffset )
m_tReporter.Fail ( "doclist offset decreased (pos=" INT64_FMT ", wordid=" UINT64_FMT ")", (int64_t)iDictPos, (uint64_t)uNewWordid );
if ( iDocs<=0 || iHits<=0 || iHits<iDocs )
m_tReporter.Fail ( "invalid docs/hits (pos=" INT64_FMT ", wordid=" UINT64_FMT ", docs=" INT64_FMT ", hits=" INT64_FMT ", hitless=%s)",
(int64_t)iDictPos, (uint64_t)uNewWordid, (int64_t)iDocs, (int64_t)iHits, ( bHitless?"true":"false" ) );
}
assert ( tIndexSettings.m_iSkiplistBlockSize>0 );
// skiplist
if ( iDocs>tIndexSettings.m_iSkiplistBlockSize && !bHitless )
{
int iSkipsOffset = tDictReader.UnzipInt();
if ( !bWordDict && iSkipsOffset<iLastSkipsOffset )
m_tReporter.Fail ( "descending skiplist pos (last=%d, cur=%d, wordid=" UINT64_FMT ")", iLastSkipsOffset, iSkipsOffset, UINT64 ( uNewWordid ) );
iLastSkipsOffset = iSkipsOffset;
}
// update stats, add checkpoint
if ( ( iWordsTotal%iWordPerCP )==0 )
{
CSphWordlistCheckpoint & tCP = dCheckpoints.Add();
tCP.m_iWordlistOffset = iDictPos;
if ( bWordDict )
{
auto iLen = (const int) strlen ( sWord );
char * sArenaWord = dCheckpointWords.AddN ( iLen + 1 );
memcpy ( sArenaWord, sWord, iLen );
sArenaWord[iLen] = '\0';
tCP.m_uWordID = sArenaWord - dCheckpointWords.Begin();
} else
tCP.m_uWordID = uNewWordid;
}
// TODO add back infix checking
uWordid = uNewWordid;
iDoclistOffset = iNewDoclistOffset;
iWordsTotal++;
}
// check the checkpoints
if ( dCheckpoints.GetLength()!=m_tWordlist.m_iDictCheckpoints )
m_tReporter.Fail ( "checkpoint count mismatch (read=%d, calc=%d)", m_tWordlist.m_iDictCheckpoints, dCheckpoints.GetLength() );
m_tWordlist.DebugPopulateCheckpoints();
for ( int i=0; i < Min ( dCheckpoints.GetLength(), m_tWordlist.m_iDictCheckpoints ); i++ )
{
CSphWordlistCheckpoint tRefCP = dCheckpoints[i];
const CSphWordlistCheckpoint & tCP = m_tWordlist.m_dCheckpoints[i];
const int iLen = bWordDict ? (int) strlen ( tCP.m_szWord ) : 0;
if ( bWordDict )
tRefCP.m_szWord = dCheckpointWords.Begin() + tRefCP.m_uWordID;
if ( bWordDict && ( tRefCP.m_szWord[0]=='\0' || tCP.m_szWord[0]=='\0' ) )
{
m_tReporter.Fail ( "empty checkpoint %d (read_word=%s, read_len=%u, readpos=" INT64_FMT ", calc_word=%s, calc_len=%u, calcpos=" INT64_FMT ")",
i, tCP.m_szWord, (DWORD)strlen ( tCP.m_szWord ), (int64_t)tCP.m_iWordlistOffset,
tRefCP.m_szWord, (DWORD)strlen ( tRefCP.m_szWord ), (int64_t)tRefCP.m_iWordlistOffset );
} else if ( sphCheckpointCmpStrictly ( tCP.m_szWord, iLen, tCP.m_uWordID, bWordDict, tRefCP ) || tRefCP.m_iWordlistOffset!=tCP.m_iWordlistOffset )
{
if ( bWordDict )
{
m_tReporter.Fail ( "checkpoint %d differs (read_word=%s, readpos=" INT64_FMT ", calc_word=%s, calcpos=" INT64_FMT ")",
i,
tCP.m_szWord,
(int64_t)tCP.m_iWordlistOffset,
tRefCP.m_szWord,
(int64_t)tRefCP.m_iWordlistOffset );
} else
{
m_tReporter.Fail ( "checkpoint %d differs (readid=" UINT64_FMT ", readpos=" INT64_FMT ", calcid=" UINT64_FMT ", calcpos=" INT64_FMT ")",
i,
(uint64_t)tCP.m_uWordID,
(int64_t)tCP.m_iWordlistOffset,
(uint64_t)tRefCP.m_uWordID,
(int64_t)tRefCP.m_iWordlistOffset );
}
}
}
dCheckpoints.Reset();
dCheckpointWords.Reset();
}
void DiskIndexChecker_c::Impl_c::CheckDocs( cbWordidFn&& fnCbWordid )
{
const CSphIndexSettings & tIndexSettings = m_tIndex.GetSettings();
if ( !fnCbWordid )
m_tReporter.Msg ( "checking data..." );
int64_t iDocsSize = m_pDocsReader->GetFilesize();
int64_t iSkiplistLen = m_tSkipsReader.GetFilesize();
m_tDictReader.SeekTo ( 1, READ_NO_SIZE_HINT );
m_pDocsReader->SeekTo ( 1 );
m_pHitsReader->SeekTo ( 1 );
SphWordID_t uWordid = 0;
int64_t iDoclistOffset = 0;
int iDictDocs, iDictHits;
bool bHitless = false;
const bool bWordDict = m_tIndex.GetDictionary()->GetSettings().m_bWordDict;
Wordid_t tCbWordid;
tCbWordid.m_bWordDict = bWordDict;
char sWord[MAX_KEYWORD_BYTES];
memset ( sWord, 0, sizeof(sWord) );
int iWordsChecked = 0;
int iWordsTotal = 0;
SphOffset_t iWordsEnd = m_tWordlist.GetWordsEnd();
while ( m_tDictReader.GetPos()<iWordsEnd )
{
bHitless = false;
SphWordID_t iDeltaWord = 0;
if ( bWordDict )
iDeltaWord = m_tDictReader.GetByte();
else
iDeltaWord = m_tDictReader.UnzipWordid();
if ( !iDeltaWord )
{
m_tDictReader.UnzipOffset();
uWordid = 0;
iDoclistOffset = 0;
continue;
}
if ( bWordDict )
{
// unpack next word
// must be in sync with DictEnd()!
BYTE uPack = (BYTE)iDeltaWord;
int iMatch, iDelta;
if ( uPack & 0x80 )
{
iDelta = ( ( uPack>>4 ) & 7 ) + 1;
iMatch = uPack & 15;
} else
{
iDelta = uPack & 127;
iMatch = m_tDictReader.GetByte();
}
auto iLastWordLen = (const int) strlen(sWord);
if ( iMatch+iDelta>=(int)sizeof(sWord)-1 || iMatch>iLastWordLen )
m_tDictReader.SkipBytes ( iDelta );
else
{
m_tDictReader.GetBytes ( sWord+iMatch, iDelta );
sWord [ iMatch+iDelta ] = '\0';
}
iDoclistOffset = m_tDictReader.UnzipOffset();
iDictDocs = m_tDictReader.UnzipInt();
iDictHits = m_tDictReader.UnzipInt();
if ( iDictDocs>=DOCLIST_HINT_THRESH )
m_tDictReader.GetByte();
if ( tIndexSettings.m_eHitless==SPH_HITLESS_SOME && ( iDictDocs & HITLESS_DOC_FLAG ) )
{
iDictDocs = ( iDictDocs & HITLESS_DOC_MASK );
bHitless = true;
}
tCbWordid.m_szWordid = sWord;
} else
{
// finish reading the entire entry
uWordid = uWordid + iDeltaWord;
bHitless = ( m_dHitlessWords.BinarySearch ( uWordid )!=NULL );
iDoclistOffset = iDoclistOffset + m_tDictReader.UnzipOffset();
iDictDocs = m_tDictReader.UnzipInt();
if ( bHitless )
iDictDocs = ( iDictDocs & HITLESS_DOC_MASK );
iDictHits = m_tDictReader.UnzipInt();
tCbWordid.m_uWordid = uWordid;
}
int64_t iSkipsOffset = 0;
if ( iDictDocs>tIndexSettings.m_iSkiplistBlockSize && !bHitless )
{
if ( m_uVersion<=57 )
iSkipsOffset = (int)m_tDictReader.UnzipInt();
else
iSkipsOffset = m_tDictReader.UnzipOffset();
}
// check whether the offset is as expected
if ( iDoclistOffset!=m_pDocsReader->GetPos() )
{
if ( !bWordDict )
m_tReporter.Fail ( "unexpected doclist offset (wordid=" UINT64_FMT "(%s)(%d), dictpos=" INT64_FMT ", doclistpos=" INT64_FMT ")",
(uint64_t)uWordid, sWord, iWordsChecked, iDoclistOffset, (int64_t) m_pDocsReader->GetPos() );
if ( iDoclistOffset>=iDocsSize || iDoclistOffset<0 )
{
m_tReporter.Fail ( "unexpected doclist offset, off the file (wordid=" UINT64_FMT "(%s)(%d), dictpos=" INT64_FMT ", doclistsize=" INT64_FMT ")",
(uint64_t)uWordid, sWord, iWordsChecked, iDoclistOffset, iDocsSize );
iWordsChecked++;
continue;
} else
m_pDocsReader->SeekTo ( iDoclistOffset );
}
// create and manually setup doclist reader
DiskIndexQwordTraits_c * pQword = sphCreateDiskIndexQword ( tIndexSettings.m_eHitFormat==SPH_HIT_FORMAT_INLINE );
pQword->m_tDoc.Reset ( m_tSchema.GetDynamicSize() );
pQword->m_tDoc.m_tRowID = INVALID_ROWID;
pQword->m_iDocs = 0;
pQword->m_iHits = 0;
pQword->SetDocReader ( m_pDocsReader );
// pQword->m_rdDoclist.SeekTo ( tDocsReader.GetPos(), READ_NO_SIZE_HINT );
pQword->SetHitReader ( m_pHitsReader );
// pQword->m_rdHitlist.SeekTo ( tHitsReader.GetPos(), READ_NO_SIZE_HINT );
// loop the doclist
int iDoclistDocs = 0;
int iDoclistHits = 0;
int iHitlistHits = 0;
bHitless |= ( tIndexSettings.m_eHitless==SPH_HITLESS_ALL ||
( tIndexSettings.m_eHitless==SPH_HITLESS_SOME && m_dHitlessWords.BinarySearch ( uWordid ) ) );
pQword->m_bHasHitlist = !bHitless;
CSphVector<SkiplistEntry_t> dDoclistSkips;
while (true)
{
// skiplist state is saved just *before* decoding those boundary entries
if ( ( iDoclistDocs & ( tIndexSettings.m_iSkiplistBlockSize-1 ) )==0 )
{
SkiplistEntry_t & tBlock = dDoclistSkips.Add();
tBlock.m_tBaseRowIDPlus1 = pQword->m_tDoc.m_tRowID+1;
tBlock.m_iOffset = pQword->m_rdDoclist->GetPos();
tBlock.m_iBaseHitlistPos = pQword->m_uHitPosition;
}
// FIXME? this can fail on a broken entry (eg fieldid over 256)
const CSphMatch & tDoc = pQword->GetNextDoc();
if ( tDoc.m_tRowID==INVALID_ROWID )
break;
// checks!
if ( tDoc.m_tRowID>m_iNumRows )
m_tReporter.Fail ( "rowid out of bounds (wordid=" UINT64_FMT "(%s), rowid=%u)", uint64_t(uWordid), sWord, tDoc.m_tRowID );
++iDoclistDocs;
iDoclistHits += pQword->m_uMatchHits;
// check position in case of regular (not-inline) hit
if (!( pQword->m_iHitlistPos>>63 ))
{
if ( !bWordDict && pQword->m_iHitlistPos!=pQword->m_rdHitlist->GetPos() )
m_tReporter.Fail ( "unexpected hitlist offset (wordid=" UINT64_FMT "(%s), rowid=%u, expected=" INT64_FMT ", actual=" INT64_FMT ")",
(uint64_t)uWordid, sWord, pQword->m_tDoc.m_tRowID, (int64_t)pQword->m_iHitlistPos, (int64_t)pQword->m_rdHitlist->GetPos() );
}
// aim
pQword->SeekHitlist ( pQword->m_iHitlistPos );
// loop the hitlist
int iDocHits = 0;
FieldMask_t dFieldMask;
dFieldMask.UnsetAll();
Hitpos_t uLastHit = EMPTY_HIT;
while ( !bHitless )
{
Hitpos_t uHit = pQword->GetNextHit();
if ( uHit==EMPTY_HIT )
break;
if ( !( uLastHit<uHit ) )
m_tReporter.Fail ( "hit entries sorting order decreased (wordid=" UINT64_FMT "(%s), rowid=%u, hit=%u, last=%u)", (uint64_t)uWordid, sWord, pQword->m_tDoc.m_tRowID, uHit, uLastHit );
if ( HITMAN::GetField ( uLastHit )==HITMAN::GetField ( uHit ) )
{
if ( !( HITMAN::GetPos ( uLastHit )<HITMAN::GetPos ( uHit ) ) )
m_tReporter.Fail ( "hit decreased (wordid=" UINT64_FMT "(%s), rowid=%u, hit=%u, last=%u)", (uint64_t)uWordid, sWord, pQword->m_tDoc.m_tRowID, HITMAN::GetPos ( uHit ), HITMAN::GetPos ( uLastHit ) );
if ( HITMAN::IsEnd ( uLastHit ) )
m_tReporter.Msg ( "WARNING, multiple tail hits (wordid=" UINT64_FMT "(%s), rowid=%u, hit=0x%x, last=0x%x)", (uint64_t)uWordid, sWord, pQword->m_tDoc.m_tRowID, uHit, uLastHit );
} else
{
if ( !( HITMAN::GetField ( uLastHit )<HITMAN::GetField ( uHit ) ) )
m_tReporter.Fail ( "hit field decreased (wordid=" UINT64_FMT "(%s), rowid=%u, hit field=%u, last field=%u)", (uint64_t)uWordid, sWord, pQword->m_tDoc.m_tRowID, HITMAN::GetField ( uHit ), HITMAN::GetField ( uLastHit ) );
}
if ( fnCbWordid )
fnCbWordid ( tDoc.m_tRowID, tCbWordid, HITMAN::GetField ( uHit ), HITMAN::GetPos ( uHit ), HITMAN::IsEnd ( uHit ) );
uLastHit = uHit;
int iField = HITMAN::GetField ( uHit );
if ( iField<0 || iField>=SPH_MAX_FIELDS )
m_tReporter.Fail ( "hit field out of bounds (wordid=" UINT64_FMT "(%s), rowid=%u, field=%d)", (uint64_t)uWordid, sWord, pQword->m_tDoc.m_tRowID, iField );
else if ( iField>=m_tSchema.GetFieldsCount() )
m_tReporter.Fail ( "hit field out of schema (wordid=" UINT64_FMT "(%s), rowid=%u, field=%d)", (uint64_t)uWordid, sWord, pQword->m_tDoc.m_tRowID, iField );
else
dFieldMask.Set(iField);
++iDocHits; // to check doclist entry
++iHitlistHits; // to check dictionary entry
}
// check hit count
if ( iDocHits!=(int)pQword->m_uMatchHits && !bHitless )
m_tReporter.Fail ( "doc hit count mismatch (wordid=" UINT64_FMT "(%s), rowid=%u, doclist=%d, hitlist=%d)", (uint64_t)uWordid, sWord, pQword->m_tDoc.m_tRowID, pQword->m_uMatchHits, iDocHits );
if ( m_tSchema.GetFieldsCount()>32 )
pQword->CollectHitMask();
// check the mask
if ( memcmp ( dFieldMask.m_dMask, pQword->m_dQwordFields.m_dMask, sizeof(dFieldMask.m_dMask) ) && !bHitless )
m_tReporter.Fail ( "field mask mismatch (wordid=" UINT64_FMT "(%s), rowid=%u)", (uint64_t)uWordid, sWord, pQword->m_tDoc.m_tRowID );
// update my hitlist reader
m_pHitsReader->SeekTo ( pQword->m_rdHitlist->GetPos() );
}
// do checks
if ( iDictDocs!=iDoclistDocs )
m_tReporter.Fail ( "doc count mismatch (wordid=" UINT64_FMT "(%s), dict=%d, doclist=%d, hitless=%s)", uint64_t(uWordid), sWord, iDictDocs, iDoclistDocs, ( bHitless?"true":"false" ) );
if ( ( iDictHits!=iDoclistHits || iDictHits!=iHitlistHits ) && !bHitless )
m_tReporter.Fail ( "hit count mismatch (wordid=" UINT64_FMT "(%s), dict=%d, doclist=%d, hitlist=%d)", uint64_t(uWordid), sWord, iDictHits, iDoclistHits, iHitlistHits );
while ( iDoclistDocs>tIndexSettings.m_iSkiplistBlockSize && !bHitless )
{
if ( iSkipsOffset<=0 || iSkipsOffset>iSkiplistLen )
{
m_tReporter.Fail ( "invalid skiplist offset (wordid=" UINT64_FMT "(%s), off=" INT64_FMT ", max=" INT64_FMT ")", UINT64 ( uWordid ), sWord, iSkipsOffset, iSkiplistLen );
break;
}
// boundary adjustment
if ( ( iDoclistDocs & ( tIndexSettings.m_iSkiplistBlockSize-1 ) )==0 )
dDoclistSkips.Pop();
SkiplistEntry_t t;
t.m_tBaseRowIDPlus1 = 0;
t.m_iOffset = iDoclistOffset;
t.m_iBaseHitlistPos = 0;
// hint is: dDoclistSkips * ZIPPED( sizeof(int64_t) * 3 ) == dDoclistSkips * 8
m_tSkipsReader.SeekTo ( iSkipsOffset, dDoclistSkips.GetLength ()*8 );
int i = 0;
while ( ++i<dDoclistSkips.GetLength() )
{
const SkiplistEntry_t & r = dDoclistSkips[i];
RowID_t tRowIDDelta = m_tSkipsReader.UnzipRowid();
uint64_t uOff = m_tSkipsReader.UnzipOffset();
uint64_t uPosDelta = m_tSkipsReader.UnzipOffset();
if ( m_tSkipsReader.GetErrorFlag () )
{
m_tReporter.Fail ( "skiplist reading error (wordid=" UINT64_FMT "(%s), exp=%d, got=%d, error='%s')", UINT64 ( uWordid ), sWord, i, dDoclistSkips.GetLength (), m_tSkipsReader.GetErrorMessage ().cstr () );
m_tSkipsReader.ResetError();
break;
}
t.m_tBaseRowIDPlus1 += tIndexSettings.m_iSkiplistBlockSize + tRowIDDelta;
t.m_iOffset += 4*tIndexSettings.m_iSkiplistBlockSize + uOff;
t.m_iBaseHitlistPos += uPosDelta;
if ( t.m_tBaseRowIDPlus1!=r.m_tBaseRowIDPlus1 || t.m_iOffset!=r.m_iOffset || t.m_iBaseHitlistPos!=r.m_iBaseHitlistPos )
{
m_tReporter.Fail ( "skiplist entry %d mismatch (wordid=" UINT64_FMT "(%s), exp={%u, " UINT64_FMT ", " UINT64_FMT "}, got={%u, " UINT64_FMT ", " UINT64_FMT "})",
i, UINT64 ( uWordid ), sWord,
r.m_tBaseRowIDPlus1, UINT64 ( r.m_iOffset ), UINT64 ( r.m_iBaseHitlistPos ),
t.m_tBaseRowIDPlus1, UINT64 ( t.m_iOffset ), UINT64 ( t.m_iBaseHitlistPos ) );
break;
}
}
break;
}
// move my reader instance forward too
m_pDocsReader->SeekTo ( pQword->m_rdDoclist->GetPos() );
// cleanup
SafeDelete ( pQword );
// progress bar
if ( (++iWordsChecked)%1000==0 )
m_tReporter.Progress ( "%d/%d", iWordsChecked, iWordsTotal );
}
}
void DiskIndexChecker_c::Impl_c::CheckAttributes()
{
if ( !m_tSchema.HasNonColumnarAttrs() )
return;
const int64_t iMinMaxStart = sizeof(DWORD) * m_iMinMaxIndex;
const int64_t iMinMaxEnd = sizeof(DWORD) * m_iMinMaxIndex + sizeof(DWORD) * ( m_iDocinfoIndex+1 ) * m_tSchema.GetRowSize() * 2;
const int64_t iMinMaxBytes = iMinMaxEnd - iMinMaxStart;
FileDebugCheckReader_c tAttrReader ( &m_tAttrReader );
FileDebugCheckReader_c tBlobReader ( m_bHasBlobs ? &m_tBlobReader : nullptr );
// common code with RT index
DebugCheck_Attributes ( tAttrReader, tBlobReader, m_iNumRows, iMinMaxBytes, m_tSchema, m_tReporter );
}
void DiskIndexChecker_c::Impl_c::CheckKillList() const
{
m_tReporter.Msg ( "checking kill-list..." );
CSphString sSPK = GetFilename(SPH_EXT_SPK);
if ( !sphIsReadable ( sSPK.cstr() ) )
return;
CSphString sError;
CSphAutoreader tReader;
if ( !tReader.Open ( sSPK.cstr(), sError ) )
{
m_tReporter.Fail ( "unable to open kill-list: %s", sError.cstr() );
return;
}
DWORD nIndexes = tReader.GetDword();
for ( int i = 0; i < (int)nIndexes; i++ )
{
CSphString sIndex = tReader.GetString();
if ( tReader.GetErrorFlag() )
{
m_tReporter.Fail ( "error reading table name from kill-list: %s", tReader.GetErrorMessage().cstr() );
return;
}
DWORD uFlags = tReader.GetDword();
DWORD uMask = KillListTarget_t::USE_KLIST | KillListTarget_t::USE_DOCIDS;
if ( uFlags & (~uMask) )
{
m_tReporter.Fail ( "unknown table flags in kill-list: %u", uMask );
return;
}
}
DWORD nKills = tReader.GetDword();
if ( tReader.GetErrorFlag() )
{
m_tReporter.Fail ( "error reading kill-list" );
return;
}
for ( DWORD i = 0; i<nKills; i++ )
{
DocID_t tDelta = tReader.UnzipOffset();
if ( tDelta<=0 )
{
m_tReporter.Fail ( "descending docids found in kill-list" );
return;
}
if ( tReader.GetErrorFlag() )
{
m_tReporter.Fail ( "error docids from kill-list" );
return;
}
}
}
void DiskIndexChecker_c::Impl_c::CheckBlockIndex()
{
if ( !m_tSchema.HasNonColumnarAttrs() )
return;
m_tReporter.Msg ( "checking attribute blocks index..." );
int64_t iAllRowsTotal = m_iNumRows + (m_iDocinfoIndex+1)*2;
DWORD uStride = m_tSchema.GetRowSize();
int64_t iLoadedRowItems = m_tAttrReader.GetFilesize() / sizeof(CSphRowitem);
if ( iAllRowsTotal*uStride>iLoadedRowItems && m_iNumRows )
m_tReporter.Fail ( "rowitems count mismatch (expected=" INT64_FMT ", loaded=" INT64_FMT ")", iAllRowsTotal*uStride, iLoadedRowItems );
// check size
const int64_t iTempDocinfoIndex = ( m_iNumRows+DOCINFO_INDEX_FREQ-1 ) / DOCINFO_INDEX_FREQ;
if ( iTempDocinfoIndex!=m_iDocinfoIndex )
m_tReporter.Fail ( "block count differs (expected=" INT64_FMT ", got=" INT64_FMT ")", iTempDocinfoIndex, m_iDocinfoIndex );
CSphFixedVector<CSphRowitem> dRow ( m_tSchema.GetRowSize() );
const CSphRowitem * pRow = dRow.Begin();
m_tAttrReader.SeekTo ( 0, (int) dRow.GetLengthBytes() );
const int64_t iMinMaxEnd = sizeof(DWORD) * m_iMinMaxIndex + sizeof(DWORD) * ( m_iDocinfoIndex+1 ) * uStride * 2;
CSphFixedVector<DWORD> dMinMax ( uStride*2 );
const DWORD * pMinEntry = dMinMax.Begin();
const DWORD * pMinAttrs = pMinEntry;
const DWORD * pMaxAttrs = pMinAttrs + uStride;
for ( int64_t iIndexEntry=0; iIndexEntry<m_iNumRows; iIndexEntry++ )
{
const int64_t iBlock = iIndexEntry / DOCINFO_INDEX_FREQ;
// we have to do some checks in border cases, for example: when move from 1st to 2nd block
const int64_t iPrevEntryBlock = ( iIndexEntry-1 )/DOCINFO_INDEX_FREQ;
const bool bIsBordersCheckTime = ( iPrevEntryBlock!=iBlock );
if ( bIsBordersCheckTime || iIndexEntry==0 )
{
int64_t iPos = m_tAttrReader.GetPos();
int64_t iBlockPos = sizeof(DWORD) * m_iMinMaxIndex + sizeof(DWORD) * iBlock * uStride * 2;
// check docid vs global range
if ( int64_t( iBlockPos + sizeof(DWORD) * uStride) > iMinMaxEnd )
m_tReporter.Fail ( "unexpected block index end (row=" INT64_FMT ", block=" INT64_FMT ")", iIndexEntry, iBlock );
m_tAttrReader.SeekTo ( iBlockPos, (int) dMinMax.GetLengthBytes() );
m_tAttrReader.GetBytes ( dMinMax.Begin(), (int) dMinMax.GetLengthBytes() );
if ( m_tAttrReader.GetErrorFlag() )
m_tReporter.Fail ( "unexpected block index (row=" INT64_FMT ", block=" INT64_FMT ")", iIndexEntry, iBlock );
m_tAttrReader.SeekTo ( iPos, (int) dRow.GetLengthBytes() );
}
m_tAttrReader.GetBytes ( dRow.Begin(), (int) dRow.GetLengthBytes() );
const DocID_t tDocID = sphGetDocID(pRow);
// check values vs blocks range
for ( int iItem=0; iItem < m_tSchema.GetAttrsCount(); iItem++ )
{
const CSphColumnInfo & tCol = m_tSchema.GetAttr(iItem);
if ( tCol.m_sName==sphGetBlobLocatorName() || tCol.IsColumnar() )
continue;
switch ( tCol.m_eAttrType )
{
case SPH_ATTR_INTEGER:
case SPH_ATTR_TIMESTAMP:
case SPH_ATTR_BOOL:
case SPH_ATTR_BIGINT:
{
const SphAttr_t uVal = sphGetRowAttr ( pRow, tCol.m_tLocator );
const SphAttr_t uMin = sphGetRowAttr ( pMinAttrs, tCol.m_tLocator );
const SphAttr_t uMax = sphGetRowAttr ( pMaxAttrs, tCol.m_tLocator );
// checks is attribute min max range valid
if ( uMin > uMax && bIsBordersCheckTime )
m_tReporter.Fail ( "invalid attribute range (row=" INT64_FMT ", block=" INT64_FMT ", min=" INT64_FMT ", max=" INT64_FMT ")", iIndexEntry, iBlock, uMin, uMax );
if ( uVal < uMin || uVal > uMax )
m_tReporter.Fail ( "unexpected attribute value (row=" INT64_FMT ", attr=%u, docid=" INT64_FMT ", block=" INT64_FMT ", value=0x" UINT64_FMT ", min=0x" UINT64_FMT ", max=0x" UINT64_FMT ")",
iIndexEntry, iItem, tDocID, iBlock, uint64_t(uVal), uint64_t(uMin), uint64_t(uMax) );
}
break;
case SPH_ATTR_FLOAT:
{
const float fVal = sphDW2F ( (DWORD)sphGetRowAttr ( pRow, tCol.m_tLocator ) );
const float fMin = sphDW2F ( (DWORD)sphGetRowAttr ( pMinAttrs, tCol.m_tLocator ) );
const float fMax = sphDW2F ( (DWORD)sphGetRowAttr ( pMaxAttrs, tCol.m_tLocator ) );
// checks is attribute min max range valid
if ( fMin > fMax && bIsBordersCheckTime )
m_tReporter.Fail ( "invalid attribute range (row=" INT64_FMT ", block=" INT64_FMT ", min=%f, max=%f)", iIndexEntry, iBlock, fMin, fMax );
if ( fVal < fMin || fVal > fMax )
m_tReporter.Fail ( "unexpected attribute value (row=" INT64_FMT ", attr=%u, docid=" INT64_FMT ", block=" INT64_FMT ", value=%f, min=%f, max=%f)", iIndexEntry, iItem, tDocID, iBlock, fVal, fMin, fMax );
}
break;
default:
break;
}
}
// progress bar
if ( iIndexEntry%1000==0 )
m_tReporter.Progress ( INT64_FMT"/" INT64_FMT, iIndexEntry, m_iNumRows );
}
}
void DiskIndexChecker_c::Impl_c::CheckColumnar()
{
if ( !m_tSchema.HasColumnarAttrs() )
return;
m_tReporter.Msg ( "checking columnar storage..." );
CheckColumnarStorage ( GetFilename(SPH_EXT_SPC), (DWORD)m_iNumRows,
[this]( const char * szError ){ m_tReporter.Fail ( "\n%s", szError ); },
[this]( const char * szProgress ){ m_tReporter.Progress ( "%s", szProgress ); } );
}
void DiskIndexChecker_c::Impl_c::CheckDocidLookup()
{
CSphString sError;
m_tReporter.Msg ( "checking doc-id lookup..." );
CSphAutoreader tLookup;
if ( !tLookup.Open ( GetFilename(SPH_EXT_SPT), sError ) )
{
// only if index not empty
if ( m_iNumRows )
m_tReporter.Fail ( "unable to lookup file: %s", sError.cstr() );
return;
}
int64_t iLookupEnd = tLookup.GetFilesize();
const CSphColumnInfo * pId = m_tSchema.GetAttr("id");
assert(pId);
CSphFixedVector<CSphRowitem> dRow ( m_tSchema.GetRowSize() );
m_tAttrReader.SeekTo ( 0, (int) dRow.GetLengthBytes() );
CSphBitvec dRowids ( (int)m_iNumRows );
int iDocs = tLookup.GetDword();
int iDocsPerCheckpoint = tLookup.GetDword();
tLookup.GetOffset(); // max docid
int64_t iLookupBase = tLookup.GetPos();
int iCheckpoints = ( iDocs + iDocsPerCheckpoint - 1 ) / iDocsPerCheckpoint;
DocidLookupCheckpoint_t tCp;
DocID_t tLastDocID = 0;
int iCp = 0;
while ( tLookup.GetPos()<iLookupEnd && iCp<iCheckpoints )
{
tLookup.SeekTo ( sizeof(DocidLookupCheckpoint_t) * iCp + iLookupBase, sizeof(DocidLookupCheckpoint_t) );
DocidLookupCheckpoint_t tPrevCp = tCp;
tCp.m_tBaseDocID = tLookup.GetOffset();
tCp.m_tOffset = tLookup.GetOffset();
tLastDocID = tCp.m_tBaseDocID;
if ( (uint64_t)tPrevCp.m_tBaseDocID>=(uint64_t)tCp.m_tBaseDocID )
m_tReporter.Fail ( "descending docid at checkpoint %d, previous docid " UINT64_FMT " docid " UINT64_FMT, iCp, tPrevCp.m_tBaseDocID, tCp.m_tBaseDocID );
tLookup.SeekTo ( tCp.m_tOffset, sizeof(DWORD) * 3 * iDocsPerCheckpoint );
int iCpDocs = iDocsPerCheckpoint;
// last checkpoint might have fewer docs
if ( iCp==iCheckpoints-1 )
{
int iLefover = ( iDocs % iDocsPerCheckpoint );
iCpDocs = ( iLefover ? iLefover : iDocsPerCheckpoint );
}
for ( int i=0; i<iCpDocs; i++ )
{
uint64_t tDelta = 0;
DocID_t tDocID = 0;
RowID_t tRowID = INVALID_ROWID;
if ( !( i % iCpDocs ) )
{
tDocID = tLastDocID;
tRowID = tLookup.GetDword();
} else
{
tDelta = tLookup.UnzipOffset();
tRowID = tLookup.GetDword();
if ( tDelta==0 )
m_tReporter.Fail ( "invalid docid delta " UINT64_FMT " at row %u, checkpoint %d, doc %d, last docid " UINT64_FMT, tDocID, tRowID, iCp, i, tLastDocID );
else
tDocID = tLastDocID + tDelta;
}
if ( tRowID>=m_iNumRows )
m_tReporter.Fail ( "rowid %u out of bounds " INT64_FMT, tRowID, m_iNumRows );
else if ( !pId->IsColumnar() )
{
// read only docid
m_tAttrReader.SeekTo ( dRow.GetLengthBytes() * tRowID, sizeof(DocID_t) );
m_tAttrReader.GetBytes ( dRow.Begin(), sizeof(DocID_t) );
if ( dRowids.BitGet ( tRowID ) )
m_tReporter.Fail ( "row %u already mapped, current docid" INT64_FMT " checkpoint %d, doc %d", tRowID, INT64_FMT, iCp, i );
dRowids.BitSet ( tRowID );
if ( tDocID!=sphGetDocID ( dRow.Begin() ) )
m_tReporter.Fail ( "invalid docid " UINT64_FMT "(" UINT64_FMT ") at row %u, checkpoint %d, doc %d, last docid " UINT64_FMT,
tDocID, sphGetDocID ( dRow.Begin() ), tRowID, iCp, i, tLastDocID );
}
tLastDocID = tDocID;
}
iCp++;
}
if ( !pId->IsColumnar() )
{
for ( int i=0; i<m_iNumRows; i++ )
{
if ( dRowids.BitGet ( i ) )
continue;
m_tAttrReader.SeekTo ( dRow.GetLengthBytes() * i, sizeof(DocID_t) );
m_tAttrReader.GetBytes ( dRow.Begin(), sizeof(DocID_t) );
DocID_t tDocID = sphGetDocID ( dRow.Begin() );
m_tReporter.Fail ( "row %u(" INT64_FMT ") not mapped at lookup, docid " UINT64_FMT, i, m_iNumRows, tDocID );
}
}
}
RowID_t DiskIndexChecker_c::Impl_c::GetRowidByDocid ( DocID_t iDocID ) const
{
CSphMappedBuffer<BYTE> tDocidLookup;
CSphString sLastError;
if ( !tDocidLookup.Setup ( GetFilename ( SPH_EXT_SPT ), sLastError, false ) )
return INVALID_ROWID;
LookupReader_c tLookupReader;
tLookupReader.SetData ( tDocidLookup.GetReadPtr() );
return tLookupReader.Find(iDocID);
}
RowID_t DiskIndexChecker_c::Impl_c::CheckIfKilled ( RowID_t iRowID ) const
{
DeadRowMap_Disk_c tDeadRowMap;
CSphString sError;
tDeadRowMap.Prealloc ( (DWORD)m_iNumRows, GetFilename ( SPH_EXT_SPM ), sError );
if ( tDeadRowMap.IsSet ( iRowID ) )
iRowID = INVALID_ROWID;
return iRowID;
}
struct DocRow_fn
{
inline static bool IsLess ( const DocidRowidPair_t & tA, DocidRowidPair_t & tB )
{
if ( tA.m_tDocID==tB.m_tDocID && tA.m_tRowID<tB.m_tRowID )
return true;
return ( tA.m_tDocID<tB.m_tDocID );
}
};
void DiskIndexChecker_c::Impl_c::CheckDocids()
{
CSphString sError;
m_tReporter.Msg ( "checking docid douplicates ..." );
CSphFixedVector<CSphRowitem> dRow ( m_tSchema.GetRowSize() );
m_tAttrReader.SeekTo ( 0, (int) dRow.GetLengthBytes() );
CSphFixedVector<DocidRowidPair_t> dRows ( m_iNumRows );
for ( int i=0; i<m_iNumRows; i++ )
{
m_tAttrReader.SeekTo ( dRow.GetLengthBytes() * i, sizeof(DocID_t) );
m_tAttrReader.GetBytes ( dRow.Begin(), sizeof(DocID_t) );
dRows[i].m_tRowID = i;
dRows[i].m_tDocID = sphGetDocID ( dRow.Begin() );
}
dRows.Sort ( DocRow_fn() );
for ( int i=1; i<dRows.GetLength(); i++ )
{
if ( dRows[i].m_tDocID==dRows[i-1].m_tDocID )
m_tReporter.Fail ( "duplicate of docid " INT64_FMT " found at rows %u %u", dRows[i].m_tDocID, dRows[i-1].m_tRowID, dRows[i].m_tRowID );
}
}
void DiskIndexChecker_c::Impl_c::CheckDocstore()
{
if ( !m_bHasDocstore )
return;
m_tReporter.Msg ( "checking docstore..." );
::CheckDocstore ( m_tDocstoreReader, m_tReporter, m_iNumRows );
}
CSphString DiskIndexChecker_c::Impl_c::GetFilename ( ESphExt eExt ) const
{
return m_tIndex.GetFilename ( eExt );
}
/// public interface
DiskIndexChecker_c::DiskIndexChecker_c ( CSphIndex& tIndex, DebugCheckError_i& tReporter )
: m_pImpl { std::make_unique<Impl_c> ( tIndex, tReporter ) }
{}
DiskIndexChecker_c::~DiskIndexChecker_c() = default;
bool DiskIndexChecker_c::OpenFiles ()
{
return m_pImpl->OpenFiles();
}
void DiskIndexChecker_c::Setup ( int64_t iNumRows, int64_t iDocinfoIndex, int64_t iMinMaxIndex, bool bCheckIdDups )
{
m_pImpl->Setup (iNumRows, iDocinfoIndex, iMinMaxIndex, bCheckIdDups );
}
CSphVector<SphWordID_t> & DiskIndexChecker_c::GetHitlessWords()
{
return m_pImpl->GetHitlessWords();
}
void DiskIndexChecker_c::Check()
{
m_pImpl->Check();
}
struct ColumnNameCmp_fn
{
inline bool IsLess ( const CSphColumnInfo & tColA, const CSphColumnInfo & tColB ) const
{
return ( strcasecmp ( tColA.m_sName.cstr(), tColB.m_sName.cstr() )<0 );
}
};
static CSphString DumpAttr ( const CSphColumnInfo & tCol )
{
CSphString sRes;
if ( tCol.m_tLocator.IsBlobAttr() )
sRes.SetSprintf ( "%s at blob@%d", sphTypeName ( tCol.m_eAttrType ), tCol.m_tLocator.m_iBlobAttrId );
else
sRes.SetSprintf ( "%s at %d@%d", sphTypeName ( tCol.m_eAttrType ), tCol.m_tLocator.m_iBitCount, tCol.m_tLocator.m_iBitOffset );
return sRes;
}
template <typename T>
void DebugCheckSchema_T ( const ISphSchema & tSchema, T & tReporter )
{
// check duplicated names
CSphVector<CSphColumnInfo> dAttrs;
dAttrs.Reserve ( tSchema.GetAttrsCount() );
for ( int iAttr=0; iAttr<tSchema.GetAttrsCount(); iAttr++ )
dAttrs.Add ( tSchema.GetAttr ( iAttr ) );
dAttrs.Sort ( ColumnNameCmp_fn() );
for ( int iAttr=1; iAttr<dAttrs.GetLength(); iAttr++ )
{
const CSphColumnInfo & tPrev = dAttrs[iAttr-1];
const CSphColumnInfo & tCur = dAttrs[iAttr];
if ( strcasecmp ( tPrev.m_sName.cstr(), tCur.m_sName.cstr() )==0 )
tReporter.Fail ( "duplicate attributes name %s for columns: %s, %s", tCur.m_sName.cstr(), DumpAttr ( tPrev ).cstr(), DumpAttr ( tCur ).cstr() );
}
}
void DiskIndexChecker_c::Impl_c::CheckSchema()
{
m_tReporter.Msg ( "checking schema..." );
DebugCheckSchema_T ( m_tSchema, m_tReporter );
}
struct StringReporter_t
{
StringBuilder_c m_sErrors;
void Fail ( const char * szFmt, ... )
{
va_list ap;
va_start ( ap, szFmt );
m_sErrors.vAppendf ( szFmt, ap );
va_end ( ap );
}
};
bool DebugCheckSchema ( const ISphSchema & tSchema, CSphString & sError )
{
StringReporter_t tRes;
DebugCheckSchema_T ( tSchema, tRes );
if ( !tRes.m_sErrors.IsEmpty() )
{
sError = tRes.m_sErrors.cstr();
return false;
} else
{
return true;
}
}
void DebugCheckSchema ( const ISphSchema & tSchema, DebugCheckError_i & tReporter )
{
DebugCheckSchema_T ( tSchema, tReporter );
}
| 55,603
|
C++
|
.cpp
| 1,417
| 36.112209
| 225
| 0.686561
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,833
|
global_idf.cpp
|
manticoresoftware_manticoresearch/src/global_idf.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "global_idf.h"
#include "sphinxint.h"
#include "fileutils.h"
#include <math.h>
#pragma pack(push, 4)
struct IDFWord_t
{
uint64_t m_uWordID;
DWORD m_iDocs;
};
#pragma pack(pop)
STATIC_SIZE_ASSERT ( IDFWord_t, 12 );
static const int HASH_BITS = 16;
using namespace sph;
/// global IDF
class CSphGlobalIDF final : public IDFer_c
{
protected:
~CSphGlobalIDF() final = default;
public:
bool Touch ( const CSphString& sFilename );
bool Preread ( const CSphString& sFilename, CSphString& sError );
float GetIDF ( const CSphString& sWord, int64_t iDocsLocal, bool bPlainIDF ) const final;
private:
DWORD GetDocs ( const CSphString& sWord ) const;
int64_t m_iTotalDocuments = 0;
int64_t m_iTotalWords = 0;
SphOffset_t m_uMTime = 0;
CSphLargeBuffer<IDFWord_t> m_pWords;
CSphLargeBuffer<int64_t> m_pHash;
};
using CSphGlobalIDFRefPtr_c = CSphRefcountedPtr<CSphGlobalIDF>;
// check if backend file was modified
bool CSphGlobalIDF::Touch ( const CSphString& sFilename )
{
// update m_uMTime, return true if modified
struct_stat tStat = { 0 };
if ( stat ( sFilename.cstr (), &tStat )<0 )
tStat.st_mtime = 0;
bool bModified = ( m_uMTime!=tStat.st_mtime );
m_uMTime = tStat.st_mtime;
return bModified;
}
bool CSphGlobalIDF::Preread ( const CSphString& sFilename, CSphString& sError )
{
Touch ( sFilename );
CSphAutofile tFile;
if ( tFile.Open ( sFilename, SPH_O_READ, sError )<0 )
return false;
const SphOffset_t iSize = sphGetFileSize ( tFile.GetFD (), nullptr ) - sizeof ( SphOffset_t );
sphReadThrottled ( tFile.GetFD (), &m_iTotalDocuments, sizeof ( SphOffset_t ));
m_iTotalWords = iSize / sizeof ( IDFWord_t );
// allocate words cache
CSphString sWarning;
if ( !m_pWords.Alloc ( m_iTotalWords, sError ))
return false;
// allocate lookup table if needed
int iHashSize = ( int ) ( U64C( 1 ) << HASH_BITS );
if ( m_iTotalWords>iHashSize * 8 )
{
if ( !m_pHash.Alloc ( iHashSize + 2, sError ))
return false;
}
// read file into memory (may exceed 2GB)
int64_t iRead = sphReadThrottled ( tFile.GetFD (), m_pWords.GetWritePtr (), iSize );
if ( iRead!=iSize )
return false;
if ( sphInterrupted ())
return false;
// build lookup table
if ( m_pHash.GetLengthBytes ())
{
int64_t* pHash = m_pHash.GetWritePtr ();
uint64_t uFirst = m_pWords[0].m_uWordID;
uint64_t uRange = m_pWords[m_iTotalWords - 1].m_uWordID - uFirst;
DWORD iShift = 0;
while ( uRange>=( U64C( 1 ) << HASH_BITS ))
{
iShift++;
uRange >>= 1;
}
pHash[0] = iShift;
pHash[1] = 0;
DWORD uLastHash = 0;
for ( int64_t i = 1; i<m_iTotalWords; i++ )
{
// check for interrupt (throttled for speed)
if (( i & 0xffff )==0 && sphInterrupted ())
return false;
auto uHash = ( DWORD ) (( m_pWords[i].m_uWordID - uFirst ) >> iShift );
if ( uHash==uLastHash )
continue;
while ( uLastHash<uHash )
pHash[++uLastHash + 1] = i;
uLastHash = uHash;
}
pHash[++uLastHash + 1] = m_iTotalWords;
}
return true;
}
DWORD CSphGlobalIDF::GetDocs ( const CSphString& sWord ) const
{
const char* s = sWord.cstr ();
// replace = to MAGIC_WORD_HEAD_NONSTEMMED for exact terms
char sBuf[3 * SPH_MAX_WORD_LEN + 4];
if ( s && *s=='=' )
{
strncpy ( sBuf, s, sizeof ( sBuf ) - 1 );
sBuf[0] = MAGIC_WORD_HEAD_NONSTEMMED;
s = sBuf;
}
uint64_t uWordID = sphFNV64 ( s );
int64_t iStart = 0;
int64_t iEnd = m_iTotalWords - 1;
auto pWords = (const IDFWord_t*)m_pWords.GetReadPtr();
if ( m_pHash.GetLengthBytes ())
{
uint64_t uFirst = pWords[0].m_uWordID;
auto uHash = ( DWORD ) (( uWordID - uFirst ) >> m_pHash[0] );
if ( uHash>( U64C( 1 ) << HASH_BITS ))
return 0;
iStart = m_pHash[uHash + 1];
iEnd = m_pHash[uHash + 2] - 1;
}
const IDFWord_t* pWord = sphBinarySearch ( pWords + iStart, pWords + iEnd,
bind ( &IDFWord_t::m_uWordID ), uWordID );
return pWord ? pWord->m_iDocs : 0;
}
float CSphGlobalIDF::GetIDF ( const CSphString& sWord, int64_t iDocsLocal, bool bPlainIDF ) const
{
int64_t iDocs = Max ( iDocsLocal, ( int64_t ) GetDocs ( sWord ));
int64_t iTotalClamped = Max ( m_iTotalDocuments, iDocs );
if ( !iDocs )
return 0.0f;
if ( bPlainIDF )
iTotalClamped += 1-iDocs;
float fLogTotal = logf ( float ( 1 + iTotalClamped ));
return logf ( float ( iTotalClamped ) / float ( iDocs )) / ( 2 * fLogTotal );
}
/// global idf definitions hash
class cGlobalIDF
{
mutable RwLock_t m_tLock;
SmallStringHash_T<CSphGlobalIDFRefPtr_c> m_hIDFs GUARDED_BY ( m_tLock );
public:
bool LoadGlobalIDF ( const CSphString& sPath, CSphString& sError );
bool ReloadGlobalIDF ( const CSphString& sPath, CSphString& sError );
CSphGlobalIDFRefPtr_c* GetIDF ( const CSphString& sPath );
StrVec_t Collect() const;
void DeleteMany ( const StrVec_t& dFiles );
void Clear ();
};
cGlobalIDF& GetGlobalIDF()
{
static cGlobalIDF tIDF;
return tIDF;
}
static CSphGlobalIDFRefPtr_c DoPrereadIDF ( const CSphString& sPath, CSphString& sError )
{
CSphGlobalIDFRefPtr_c pNewIDF { new CSphGlobalIDF };
if ( !pNewIDF->Preread ( sPath, sError ))
pNewIDF = nullptr;
return pNewIDF;
}
bool cGlobalIDF::LoadGlobalIDF ( const CSphString& sPath, CSphString& sError )
{
sphLogDebug ( "Loading global IDF (%s)", sPath.cstr ());
auto pGlobalIDF = DoPrereadIDF ( sPath, sError );
if ( !pGlobalIDF )
return false;
ScWL_t wLock ( m_tLock );
m_hIDFs.Add ( std::move (pGlobalIDF), sPath );
return true;
}
bool cGlobalIDF::ReloadGlobalIDF ( const CSphString& sPath, CSphString& sError )
{
sphLogDebug ( "Reloading global IDF (%s)", sPath.cstr ());
auto pGlobalIDF = DoPrereadIDF ( sPath, sError );
if ( !pGlobalIDF )
return false;
ScWL_t wLock ( m_tLock );
auto* ppGlobalIDF = m_hIDFs ( sPath );
if ( ppGlobalIDF )
*ppGlobalIDF = std::exchange ( pGlobalIDF, nullptr );
return true;
}
CSphGlobalIDFRefPtr_c* cGlobalIDF::GetIDF ( const CSphString& sPath )
{
ScRL_t RLock ( m_tLock );
return m_hIDFs ( sPath );
}
StrVec_t cGlobalIDF::Collect() const
{
StrVec_t dCollection;
ScRL_t rLock ( m_tLock );
for ( auto& dIdf : m_hIDFs )
dCollection.Add ( dIdf.first );
return dCollection;
}
void cGlobalIDF::DeleteMany ( const StrVec_t& dFiles )
{
ScWL_t wLock ( m_tLock );
for ( const auto& sKey : dFiles )
{
sphLogDebug ( "Unloading global IDF (%s)", sKey.cstr() );
m_hIDFs.Delete ( sKey );
}
}
void cGlobalIDF::Clear()
{
ScWL_t wLock ( m_tLock );
m_hIDFs.Reset();
}
bool sph::PrereadGlobalIDF ( const CSphString& sPath, CSphString& sError )
{
auto& tGlobalIDF = GetGlobalIDF();
auto* ppGlobalIDF = tGlobalIDF.GetIDF(sPath);
if ( !ppGlobalIDF )
return tGlobalIDF.LoadGlobalIDF ( sPath, sError );
auto& pGlobalIDF = *ppGlobalIDF;
if ( pGlobalIDF && pGlobalIDF->Touch ( sPath ))
return tGlobalIDF.ReloadGlobalIDF ( sPath, sError );
return true;
}
static StrVec_t CollectUnlistedIn ( const StrVec_t& dFiles )
{
StrVec_t dAllIDFs = GetGlobalIDF().Collect();
StrVec_t dCollection;
for ( const auto& sIdf : dAllIDFs )
if ( !dFiles.Contains ( sIdf ) )
dCollection.Add ( sIdf );
return dCollection;
}
static void DeleteUnlistedIn ( const StrVec_t& dFiles )
{
auto dUnlisted = CollectUnlistedIn ( dFiles );
GetGlobalIDF().DeleteMany(dUnlisted);
}
void sph::UpdateGlobalIDFs ( const StrVec_t& dFiles )
{
// delete unlisted entries
DeleteUnlistedIn ( dFiles );
// load/rotate remaining entries
CSphString sError;
ARRAY_FOREACH ( i, dFiles )
{
const auto& sPath = dFiles[i];
if ( !PrereadGlobalIDF ( sPath, sError ))
sphLogDebug ( "Could not load global IDF (%s): %s", sPath.cstr (), sError.cstr ());
}
}
void sph::ShutdownGlobalIDFs ()
{
StrVec_t dAllIDFs = GetGlobalIDF().Collect();
GetGlobalIDF().DeleteMany ( dAllIDFs );
}
IDFerRefPtr_c sph::GetIDFer ( const CSphString& IDFPath )
{
IDFerRefPtr_c pResult;
auto* ppGlobalIDF = GetGlobalIDF().GetIDF ( IDFPath );
if ( ppGlobalIDF )
pResult = *ppGlobalIDF;
return pResult;
}
| 8,372
|
C++
|
.cpp
| 275
| 28.261818
| 97
| 0.70534
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
16,834
|
threadutils.cpp
|
manticoresoftware_manticoresearch/src/threadutils.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "threadutils.h"
#include <boost/context/detail/prefetch.hpp>
#include <optional>
#include "datetime.h"
#if !_WIN32
// UNIX-specific headers and calls
#include <sys/syscall.h>
#include <signal.h>
#endif
// for thr_self()
#ifdef __FreeBSD__
#include <sys/thr.h>
#endif
using namespace Threads;
const char* TaskStateName ( TaskState_e eState )
{
switch (eState)
{
case TaskState_e::UNKNOWN: return "-";
case TaskState_e::HANDSHAKE: return "handshake";
case TaskState_e::NET_READ: return "net_read";
case TaskState_e::NET_WRITE: return "net_write";
case TaskState_e::QUERY: return "query";
case TaskState_e::NET_IDLE: return "net_idle";
case TaskState_e::RETIRED: return "- retired";
}
return "unknown";
}
const char* ProtoName ( Proto_e eProto )
{
switch ( eProto )
{
case Proto_e::UNKNOWN: return "-";
case Proto_e::SPHINX:
case Proto_e::SPHINXSE: return "sphinx";
case Proto_e::MYSQL41: return "mysql";
case Proto_e::HTTP: return "http";
case Proto_e::HTTPS: return "https";
case Proto_e::REPLICATION: return "replication";
default: break;
}
return "unknown";
}
const char * RelaxedProtoName ( Proto_e eProto )
{
switch ( eProto )
{
case Proto_e::UNKNOWN: return "-";
case Proto_e::MYSQL41: return "mysql";
case Proto_e::REPLICATION: return "replication";
case Proto_e::SPHINX:
case Proto_e::HTTP: return "sphinx and http(s)";
case Proto_e::HTTPS: return "https";
case Proto_e::SPHINXSE: return "sphinx (to connect from SphinxSE)";
default: break;
}
return "unknown";
}
int GetOsThreadId ()
{
#if _WIN32
return GetCurrentThreadId();
#elif defined ( __APPLE__ )
uint64_t tid;
pthread_threadid_np(NULL, &tid);
return tid;
#elif defined(SYS_gettid)
return syscall ( SYS_gettid );
#elif defined(__FreeBSD__)
long tid;
thr_self(&tid);
return (int)tid;
#else
return 0;
#endif
}
int GetOsProcessId()
{
#if _WIN32
return GetCurrentProcessId();
#else
return getpid();
#endif
}
#include "event.h"
#include <atomic>
//////////////////////////////////////////////////////////////////////////
/// functional threadpool with minimum footprint
#define LOG_LEVEL_DEBUG false
#define LOG_LEVEL_DETAIL false
#define LOG_LEVEL_ALONE LOG_LEVEL_DETAIL
//#define LOG_LEVEL_ALONE true
namespace Threads {
namespace logdetail {
const char* name() { return MyThd ().m_sThreadName.cstr(); }
}
#define LOG_COMPONENT_MT "(" << GetOsThreadId() << ") " << logdetail::name() << ": "
using Operation_t = Threads::details::SchedulerOperation_t;
using OpSchedule_t = Threads::details::OpQueue_T<Operation_t>;
struct TaskServiceThreadInfo_t
{
OpSchedule_t m_dPrivateQueue;
long m_iPrivateOutstandingWork = 0;
};
class TaskService_t
{
public:
using operation = Operation_t;
};
// Helper class to determine whether or not the current thread is inside an
// invocation of Service_t::run() for a specified service object.
// That may be used to optimize codeflow (like place continuation without locks)
template<typename Key, typename Value = BYTE>
class CallStack_c
{
public:
// Context class automatically pushes the key/value pair on to the stack.
class Context_c : public ISphNoncopyable
{
Key * m_pService; // The key associated with the context.
Value * m_pThisContext; // The value associated with the context.
Context_c * m_pNext; // The next element in the stack.
friend class CallStack_c<Key, Value>;
public:
// Push the key on to the stack.
explicit Context_c ( Key * pService )
: m_pService ( pService ),
m_pNext ( CallStack_c<Key, Value>::m_pTop )
{
m_pThisContext = (Value *)this;
CallStack_c<Key, Value>::m_pTop = this;
}
// Push the key/value pair on to the stack.
Context_c ( Key * pKey, Value & v )
: m_pService ( pKey ),
m_pThisContext ( &v ),
m_pNext ( CallStack_c<Key, Value>::m_pTop )
{
CallStack_c<Key, Value>::m_pTop = this;
}
// Pop the key/value pair from the stack.
~Context_c ()
{
CallStack_c<Key, Value>::m_pTop = m_pNext;
}
// Find the next context with the same key.
Value * NextByKey () const noexcept
{
for ( auto* pElem = m_pNext; pElem!=nullptr; pElem = pElem->m_pNext )
if ( pElem->m_pService==m_pService )
return pElem->m_pThisContext;
return nullptr;
}
};
friend class Context_c;
// Determine whether the specified owner is on the stack.
// Returns address of key, if present, nullptr otherwise.
static Value * Contains ( Key * pKey ) noexcept
{
for ( auto* pElem = m_pTop; pElem!=nullptr; pElem = pElem->m_pNext )
if ( pElem->m_pService==pKey )
return pElem->m_pThisContext;
return nullptr;
}
// Obtain the value at the top of the stack.
static Value * Top () noexcept
{
Context_c * pElem = m_pTop;
return pElem ? pElem->m_pThisContext : nullptr;
}
private:
// The top of the stack of calls for the current thread.
static thread_local Context_c* m_pTop;
};
template<typename Key, typename Value>
thread_local typename CallStack_c<Key, Value>::Context_c* CallStack_c<Key,Value>::m_pTop = nullptr;
//struct Service_i
//{
// virtual ~Service_i() {}
// virtual void run() = 0;
// virtual void reset() = 0;
//};
#define LOG_LEVEL_WORKS false
#define LOG_LEVEL_ST false
#define LOG_LEVEL_SERVICE LOG_LEVEL_DETAIL
//#define LOG_LEVEL_SERVICE true
#define LOG_COMPONENT_SVC LOG_COMPONENT_MT << " [" << &m_iOutstandingWork << "]=" << m_iOutstandingWork
/// performs tasks pushed with post() in one or many threads until they done.
/// Naming convention of members is inherited from boost::asio as drop-in replacement.
struct Service_t : public TaskService_t//, public Service_i
{
std::atomic<long> m_iOutstandingWork {0}; /// count of unfinished works
mutable CSphMutex m_dMutex; /// protect access to internal data
bool m_bStopped = false; /// dispatcher has been stopped.
bool m_bOneThread; /// optimize for single-threaded use case
sph::Event_c m_tWakeupEvent; /// event to wake up blocked threads
OpSchedule_t m_OpQueue GUARDED_BY ( m_dMutex ); /// The queue of handlers that are ready to be delivered
OpSchedule_t m_OpVipQueue GUARDED_BY ( m_dMutex ); /// The queue of handlers that have to be delivered BEFORE OpQueue
// Per-thread call stack to track the state of each thread in the service.
using ThreadCallStack_c = CallStack_c<Service_t, TaskServiceThreadInfo_t>;
class Work_c; /// Scoped RAII work to keep service running. calls work_started/work_finished
friend class Work_c;
public:
explicit Service_t (bool bOneThread)
: m_bOneThread ( bOneThread ) {}
inline void post_op ( Service_t::operation* pOp) // post into secondary queue
{
post_immediate_completion ( pOp, false );
}
inline void defer_op ( Service_t::operation* pOp ) // post into primary queue
{
post_immediate_completion ( pOp, true );
}
void post_continuation ( Service_t::operation * pOp )
{
auto * pThisThread = ThreadCallStack_c::Contains ( this );
if ( pThisThread )
{
++pThisThread->m_iPrivateOutstandingWork;
pThisThread->m_dPrivateQueue.Push ( pOp );
LOG ( SERVICE, SVC ) << "post this";
return;
}
work_started ();
ScopedMutex_t dLock ( m_dMutex );
LOG ( SERVICE, SVC ) << "post";
m_OpVipQueue.Push ( pOp );
wake_one_thread_and_unlock ( dLock );
}
void post_immediate_completion ( Service_t::operation * pOp, bool bVip )
{
if ( m_bOneThread )
{
auto * pThisThread = ThreadCallStack_c::Contains ( this );
if ( pThisThread )
{
++pThisThread->m_iPrivateOutstandingWork;
pThisThread->m_dPrivateQueue.Push ( pOp );
LOG ( SERVICE, SVC ) << "post this";
return;
}
}
work_started ();
ScopedMutex_t dLock ( m_dMutex );
LOG ( SERVICE, MT ) << "post";
if ( bVip )
m_OpVipQueue.Push ( pOp );
else
m_OpQueue.Push ( pOp );
wake_one_thread_and_unlock ( dLock );
}
void run ( std::atomic<bool>& bBusy ) NO_THREAD_SAFETY_ANALYSIS //override
{
LOG ( SERVICE, SVC ) << "run " << m_iOutstandingWork << " st:" << !!m_bStopped;
if ( m_iOutstandingWork==0 )
{
LOG ( WORKS, MT ) << "run m_iOutstandingWork " << m_iOutstandingWork << " " << &m_iOutstandingWork<< " stop!";
stop();
return;
}
TaskServiceThreadInfo_t dThisThread;
dThisThread.m_iPrivateOutstandingWork = 0;
ThreadCallStack_c::Context_c dCtx ( this, dThisThread );
ScopedMutex_t dLock ( m_dMutex );
while ( do_run_one ( dLock, dThisThread, bBusy ) )
dLock.Lock ();
}
bool queue_empty() const REQUIRES ( m_dMutex )
{
return m_OpQueue.Empty () && m_OpVipQueue.Empty ();
}
inline bool do_run_one ( ScopedMutex_t& dLock, TaskServiceThreadInfo_t& this_thread, std::atomic<bool>& bBusy ) noexcept
REQUIRES ( dLock ) RELEASE ( dLock ) TRY_ACQUIRE ( false, dLock )
{
while ( !m_bStopped )
{
LOG ( SERVICE, MT ) << "locked " << dLock.Locked();
assert ( dLock.Locked ());
if ( queue_empty() )
{
m_tWakeupEvent.Clear ( dLock );
m_tWakeupEvent.Wait ( dLock );
continue;
}
auto & dOpQueue = m_OpVipQueue.Empty () ? m_OpQueue : m_OpVipQueue;
auto * pOp = dOpQueue.Front ();
dOpQueue.Pop ();
if ( !queue_empty () && !m_bOneThread )
wake_one_thread_and_unlock ( dLock );
else
dLock.Unlock ();
bBusy.store ( true, std::memory_order_relaxed );
boost::context::detail::prefetch_range ( pOp, sizeof ( Operation_t ) );
pOp->Complete (this);
bBusy.store ( false, std::memory_order_relaxed );
LOG ( SERVICE, MT ) << "completed & unlocked";
if ( this_thread.m_iPrivateOutstandingWork>1 )
{
m_iOutstandingWork += this_thread.m_iPrivateOutstandingWork-1;
LOG ( WORKS, MT ) << "do_run_one m_iOutstandingWork " << m_iOutstandingWork << " " << &m_iOutstandingWork;
}
else if ( this_thread.m_iPrivateOutstandingWork<1 )
work_finished ();
this_thread.m_iPrivateOutstandingWork = 0;
if ( !this_thread.m_dPrivateQueue.Empty ())
{
dLock.Lock ();
m_OpVipQueue.Push ( this_thread.m_dPrivateQueue );
}
return true;
}
return false;
}
void stop()
{
LOG ( SERVICE, SVC ) << "stop";
ScopedMutex_t dLock ( m_dMutex );
stop_all_threads ( dLock );
}
bool stopped () const
{
ScopedMutex_t dLock ( m_dMutex );
return m_bStopped;
}
void reset () //override
{
LOG ( DETAIL, MT ) << "reset stopped ";
ScopedMutex_t dLock ( m_dMutex );
m_bStopped = false;
}
// Notify that some work has started.
void work_started ()
{
LOG ( SERVICE, SVC ) << "work_started from " << m_iOutstandingWork;
++m_iOutstandingWork;
LOG ( WORKS, MT ) << "work_started m_iOutstandingWork " << m_iOutstandingWork << " " << &m_iOutstandingWork;
}
// Notify that some work has finished.
void work_finished ()
{
LOG ( SERVICE, SVC ) << "work_finished to " << m_iOutstandingWork-1;
if ( --m_iOutstandingWork==0 )
stop ();
LOG ( WORKS, MT ) << "work_finished m_iOutstandingWork " << m_iOutstandingWork << " " << &m_iOutstandingWork;
}
void stop_all_threads ( ScopedMutex_t & dLock ) REQUIRES ( dLock )
{
m_bStopped = true;
m_tWakeupEvent.SignalAll ( dLock );
}
void wake_one_thread_and_unlock ( ScopedMutex_t & dLock ) REQUIRES ( dLock ) RELEASE ( dLock )
{
if ( !m_tWakeupEvent.MaybeUnlockAndSignalOne ( dLock ))
dLock.Unlock ();
}
long works() const noexcept
{
return m_iOutstandingWork;
}
NTasks_t tasks() const
{
ScopedMutex_t dLock ( m_dMutex );
return { (int)m_OpVipQueue.GetLength(), (int)m_OpQueue.GetLength() };
}
};
/// helper to hold the service running
class Service_t::Work_c
{
Service_t& m_tServiceRef;
public:
explicit Work_c( Service_t& tService )
: m_tServiceRef (tService)
{
m_tServiceRef.work_started ();
}
Work_c ( const Work_c& tOther)
: m_tServiceRef ( tOther.m_tServiceRef )
{
m_tServiceRef.work_started ();
}
Work_c & operator= ( const Work_c & ) = delete;
~Work_c()
{
m_tServiceRef.work_finished();
}
};
#define LOG_COMPONENT_TP LOG_COMPONENT_MT << ": "
#define LOG_COMPONENT_ST LOG_COMPONENT_MT << " strand: "
// strand - sequental scheduler. Operations executed strictly sequentally and in FIFO order.
// It looks like 'single thread', but actual thread is provided from backend and may change.
class Strand_c final : public SchedulerWithBackend_i
{
struct StrandWorker_t final : public ISphRefcountedMT
{
CSphMutex m_dMutex;
bool m_bLocked GUARDED_BY ( m_dMutex ) = false;
OpSchedule_t m_OpWaitQueue GUARDED_BY ( m_dMutex ); /// The queue for the next run
OpSchedule_t m_OpReadyQueue; /// The queue for current run
// strand has no backend thread/threadpool and works over another scheduler.
Scheduler_i* m_pBackend = nullptr;
inline bool Enqueue ( Threads::details::SchedulerOperation_t* pOp )
{
ScopedMutex_t tLock ( m_dMutex );
if ( m_bLocked )
{
m_OpWaitQueue.Push ( pOp );
LOG ( ST, ST ) << " enqueued to wait queue, was locked " << pOp;
return false;
}
m_bLocked = true;
tLock.Unlock();
m_OpReadyQueue.Push ( pOp );
LOG ( ST, ST ) << " enqueued to ready queue, locked " << pOp;
return true;
}
// try to execute immediately, or then post to primary queue
void PostContinuationToBackend ( Threads::details::SchedulerOperation_t* pOp ) const
{
// Add the function to the strand and schedule the strand if required.
if ( m_pBackend )
m_pBackend->ScheduleContinuationOp ( pOp );
}
inline Keeper_t KeepWorking() const
{
assert ( m_pBackend );
return m_pBackend->KeepWorking();
}
protected:
~StrandWorker_t() final = default;
};
using StrandWorkerPtr_t = CSphRefcountedPtr<StrandWorker_t>;
StrandWorkerPtr_t m_pWorker;
const char* m_szName = nullptr;
// Per-thread call stack to track the state of each thread in the service.
using StrandCallStack_c = CallStack_c<StrandWorker_t>;
class Invoker_c
{
StrandWorkerPtr_t m_pOwner;
Keeper_t m_tParentKeeper;
public:
explicit Invoker_c ( StrandWorkerPtr_t pRand );
Invoker_c ( const Invoker_c& rhs ) = default;
Invoker_c ( Invoker_c && rhs ) noexcept;
Invoker_c & operator= ( Invoker_c && rhs ) noexcept;
void run ();
};
friend class Invoker_c;
inline bool Enqueue ( Threads::details::SchedulerOperation_t* pOp )
{
assert ( m_pWorker );
return m_pWorker->Enqueue ( pOp );
}
void PostContinuationImpl ( Threads::details::SchedulerOperation_t* pOp ) // try to execute immediately, or then post to primary queue
{
auto bThisThread = !!StrandCallStack_c::Contains ( m_pWorker );
if ( bThisThread )
{
LOG ( ST, ST ) << "PostContinuation fast in this thread";
Threads::JobTracker_t dTrack;
// barrier ensures that no operations till here would be reordered below.
std::atomic_thread_fence ( std::memory_order_acquire );
pOp->Complete(pOp);
std::atomic_thread_fence ( std::memory_order_release );
LOG ( ST, ST ) << "strand PostContinuation performed without queuing";
return;
}
bool bFirst = Enqueue ( pOp );
// Add the function to the strand and schedule the strand if required.
if ( bFirst )
{
Invoker_c tInvoker { m_pWorker };
m_pWorker->PostContinuationToBackend ( Threads::details::Handler2Op ( [t = std::move ( tInvoker )]() mutable { t.run(); } ) );
}
}
public:
explicit Strand_c ( Scheduler_i* pBackend, const char* szName=nullptr )
: m_pWorker { new StrandWorker_t }
, m_szName { szName }
{
m_pWorker->m_pBackend = pBackend;
LOGINFO ( TPLIFE, TP ) << "Strand_c created";
}
void ScheduleOp ( Threads::details::SchedulerOperation_t* pOp, bool bVip ) final
{
LOG ( ST, ST ) << "Post";
bool bFirst = Enqueue ( pOp );
if ( bFirst && m_pWorker->m_pBackend )
{
LOG ( ST, ST ) << "Post scheduled invoker to backend";
Invoker_c tInvoker { m_pWorker };
m_pWorker->m_pBackend->Schedule ( [t=std::move(tInvoker)] () mutable { t.run (); }, bVip );
}
LOG ( ST, ST ) << "Post finished";
}
void ScheduleContinuationOp ( Threads::details::SchedulerOperation_t* pOp ) final
{
LOG ( ST, ST ) << "ScheduleContinuation";
PostContinuationImpl ( pOp );
LOG ( ST, ST ) << "Post finished";
}
Keeper_t KeepWorking () final
{
assert ( m_pWorker );
return m_pWorker->KeepWorking();
}
bool SetBackend ( Scheduler_i* pBackend ) final
{
assert ( m_pWorker );
ScopedMutex_t tLock ( m_pWorker->m_dMutex );
if ( m_pWorker->m_bLocked )
{
if ( m_pWorker->m_pBackend ) // everything healthy and work, can't change right now
return false;
assert ( !m_pWorker->m_pBackend );
m_pWorker->m_pBackend = pBackend;
tLock.Unlock();
Invoker_c tInvoker { m_pWorker };
m_pWorker->PostContinuationToBackend ( Threads::details::Handler2Op ( [t = std::move ( tInvoker )]() mutable { t.run(); } ) );
}
m_pWorker->m_pBackend = pBackend;
return true;
}
const char * Name () const final { return m_szName; }
};
Strand_c::Invoker_c::Invoker_c ( StrandWorkerPtr_t pRand )
: m_pOwner { std::move(pRand) }
, m_tParentKeeper { m_pOwner->KeepWorking() }
{}
Strand_c::Invoker_c::Invoker_c ( Strand_c::Invoker_c && rhs ) noexcept
: m_pOwner ( rhs.m_pOwner )
{
m_tParentKeeper.Swap ( rhs.m_tParentKeeper );
}
Strand_c::Invoker_c & Strand_c::Invoker_c::operator= ( Strand_c::Invoker_c && rhs ) noexcept
{
m_tParentKeeper.Swap ( rhs.m_tParentKeeper );
m_pOwner = rhs.m_pOwner;
return *this;
}
void Strand_c::Invoker_c::run ()
{
struct OnInvokerFinished_t
{
Strand_c::Invoker_c* m_pThis;
~OnInvokerFinished_t()
{
bool bMoreHandlers;
auto& pOwner = m_pThis->m_pOwner;
{
ScopedMutex_t tLock ( pOwner->m_dMutex );
pOwner->m_OpReadyQueue.Push ( pOwner->m_OpWaitQueue );
bMoreHandlers = pOwner->m_bLocked = !pOwner->m_OpReadyQueue.Empty ();
}
LOG ( ST, ST ) << "OnInvokerFinished_t: " << bMoreHandlers;
if ( !bMoreHandlers )
{
LOG ( ST, ST ) << "OnInvokerFinished_t, abandoned, unlocked";
return;
}
LOG ( ST, ST ) << "OnInvokerFinished_t, have more, locked";
Strand_c::Invoker_c tInvoker { *m_pThis };
// pOwner->Schedule ( [t=std::move(tInvoker)] () mutable { t.run (); }, true );
pOwner->PostContinuationToBackend ( Threads::details::Handler2Op ( [t = std::move ( tInvoker )]() mutable { t.run(); } ) );
}
};
StrandCallStack_c::Context_c dCtx ( m_pOwner );
// that will ensure the next handler, if any, will be scheduled on block exit
OnInvokerFinished_t VARIABLE_IS_NOT_USED dOnFinished = { this };
// Run all ready handlers. No lock is required since the ready queue is
// accessed only within the strand.
while ( !m_pOwner->m_OpReadyQueue.Empty () )
{
auto * pOp = m_pOwner->m_OpReadyQueue.Front ();
m_pOwner->m_OpReadyQueue.Pop ();
LOG ( ST, ST ) << "run op: " << pOp;
boost::context::detail::prefetch_range ( pOp, sizeof ( Operation_t ) );
pOp->Complete ( pOp );
}
}
class ThreadPool_c final : public Worker_i
{
using Work = Service_t::Work_c;
const char * m_szName = nullptr;
Service_t m_tService;
std::optional<Work> m_dWork;
CSphMutex m_dMutex;
std::atomic<bool> m_bStop {false};
struct alignas ( 64 ) Thd_t // alignas cacheline, to freely access m_bBusy without cache poisoning
{
std::atomic<bool> m_bBusy { false };
SphThread_t m_tThread;
LowThreadDesc_t* m_pChild = nullptr;
};
// support iteration over children for show threads and hazards
mutable RwLock_t m_dChildGuard;
CSphFixedVector<Thd_t> m_dThreads { 0 };
void Post ( Threads::details::SchedulerOperation_t* pOp, bool bVip = false ) // post to primary (vip) or secondary queue
{
LOG ( DETAIL, TP ) << "Post " << bVip;
if ( bVip )
m_tService.defer_op ( pOp );
else
m_tService.post_op ( pOp );
LOG ( DETAIL, TP ) << "Post finished";
}
void PostContinuation ( Threads::details::SchedulerOperation_t* pOp ) // 'very vip' - try to execute immediately, or post to the primary queue
{
LOG ( DETAIL, TP ) << "PostContinuation";
m_tService.post_continuation ( pOp );
LOG ( DETAIL, TP ) << "Post finished";
}
// Service_i & Service ()
// {
// return m_tService;
// }
void createWork ()
{
m_dWork.emplace ( m_tService );
}
void loop (int iChild) NO_THREAD_SAFETY_ANALYSIS
{
{
ScWL_t _ ( m_dChildGuard );
m_dThreads[iChild].m_pChild = &MyThd ();
}
while (true)
{
m_tService.run ( m_dThreads[iChild].m_bBusy );
ScopedMutex_t dLock {m_dMutex};
if ( m_bStop )
break;
if ( !m_dWork )
{
createWork ();
m_tService.reset ();
}
}
ScWL_t _ ( m_dChildGuard );
m_dThreads[iChild].m_pChild = nullptr;
}
public:
ThreadPool_c ( size_t iThreadCount, const char * szName )
: m_szName {szName}
, m_tService ( iThreadCount==1 )
{
createWork ();
m_dThreads.Reset ( (int) iThreadCount );
ARRAY_FOREACH ( i, m_dThreads )
Threads::CreateQ ( &m_dThreads[i].m_tThread, [this,i] { loop (i); }, false, m_szName, i );
LOG ( DEBUG, TP ) << "thread pool created with threads: " << iThreadCount;
LOGINFO ( TPLIFE, TP ) << "thread pool created with threads: " << iThreadCount;
}
~ThreadPool_c () final
{
LOGINFO ( TPLIFE, TP ) << "thread pool destroying";
StopAll();
ScWL_t _ ( m_dChildGuard ); // that will keep children list if smbody still iterates over it
}
void DiscardOnFork () final
{
ScWL_t _ ( m_dChildGuard );
m_dThreads.Reset ( 0 );
}
void ScheduleOp ( Threads::details::SchedulerOperation_t* pOp, bool bVip ) final
{
Post ( pOp, bVip );
}
void ScheduleContinuationOp ( Threads::details::SchedulerOperation_t* pOp ) final
{
PostContinuation ( pOp );
}
#define LOG_LEVEL_SERVICE_KEEP_MT false
#if LOG_LEVEL_SERVICE_KEEP_MT
static intptr_t KeepWorkingID()
{
static std::atomic<intptr_t> uWorker { 0ULL };
return uWorker.fetch_add ( 1, std::memory_order_relaxed );
}
Keeper_t KeepWorking() final
{
m_tService.work_started();
auto kwid = KeepWorkingID();
LOGINFO ( SERVICE_KEEP_MT, MT ) << "KeepWorking " << kwid;
return { (void*)kwid, [this] ( void* kwid ) {
m_tService.work_finished (); // divided to lines for breakpoints
LOGINFO ( SERVICE_KEEP_MT, MT ) << "KeepWorking finished " << (intptr_t)kwid; } };
}
#else
Keeper_t KeepWorking() final
{
m_tService.work_started();
return { nullptr, [this] ( void* ) { m_tService.work_finished(); } };
}
#endif
int WorkingThreads () const final NO_THREAD_SAFETY_ANALYSIS
{
return m_dThreads.GetLength ();
}
int Works () const final
{
return (int)m_tService.works ();
}
NTasks_t Tasks() const noexcept final
{
return m_tService.tasks();
}
int CurTasks() const noexcept final NO_THREAD_SAFETY_ANALYSIS
{
return (int)m_dThreads.count_of ( [] ( auto& i ) { return i.m_bBusy.load ( std::memory_order_relaxed ); } );
}
void IterateChildren ( ThreadFN& fnHandler ) noexcept final
{
ScRL_t _ ( m_dChildGuard );
for ( const auto& tThd : m_dThreads )
fnHandler ( tThd.m_pChild );
}
void StopAll () final NO_THREAD_SAFETY_ANALYSIS
{
ScopedMutex_t dLock { m_dMutex };
m_bStop = true;
m_dWork.reset ();
if ( sphIsDied() )
m_tService.stop();
dLock.Unlock ();
LOG ( DEBUG, TP ) << "stopping thread pool";
LOGINFO ( TPLIFE, TP ) << "stopping thread pool";
for ( auto & dThread : m_dThreads )
Threads::Join ( &dThread.m_tThread );
LOG ( DEBUG, TP ) << "thread pool stopped";
LOGINFO ( TPLIFE, TP ) << "thread pool stopped";
m_dThreads.Reset ( 0 );
}
};
class AloneThread_c final : public Worker_i
{
CSphString m_sName;
int m_iThreadNum;
Service_t m_tService;
std::atomic<bool> m_bStarted {false};
std::atomic<bool> m_bBusy {false};
static int m_iRunningAlones;
void Post ( Service_t::operation* pOp, bool bVip=false ) // post to primary (vip) or secondary queue
{
LOG ( DETAIL, TP ) << "Post " << bVip;
if ( bVip )
m_tService.defer_op ( pOp );
else
m_tService.post_op ( pOp );
LOG ( DETAIL, TP ) << "Post finished";
if ( !m_bStarted )
{
m_bStarted = true;
SphThread_t tThd; // dummy, since we're starting detached
Threads::CreateQ ( &tThd, [this] { loop (); }, true, m_sName.cstr (), m_iThreadNum );
LOG ( DEBUG, TP ) << "alone thread created";
}
}
void loop ()
{
Detached::AddThread ( &MyThd () );
m_tService.run ( m_bBusy );
Detached::RemoveThread ( &MyThd () );
delete this;
}
public:
explicit AloneThread_c ( int iNum, const char * szName )
: m_sName {szName}
, m_iThreadNum ( iNum )
, m_tService ( true ) // true means 'single-thread'
{
++m_iRunningAlones;
LOG ( DEBUG, TP ) << "alone worker created " << szName;
}
~AloneThread_c () final
{
LOG ( DEBUG, TP ) << "stopping thread";
--m_iRunningAlones;
LOG ( DEBUG, TP ) << "thread stopped";
LOGINFO ( TPLIFE, TP ) << "AloneThread_c destroyed";
}
void ScheduleOp ( Service_t::operation* pOp , bool bVip ) final
{
Post ( pOp, bVip );
}
#define LOG_LEVEL_SERVICE_KEEP_ALONE false
#if LOG_LEVEL_SERVICE_KEEP_ALONE
static intptr_t KeepWorkingID()
{
static std::atomic<intptr_t> uWorker { 0ULL };
return uWorker.fetch_add ( 1, std::memory_order_relaxed );
}
Keeper_t KeepWorking() final
{
m_tService.work_started();
auto kwid = KeepWorkingID();
LOGINFO ( SERVICE_KEEP_ALONE, MT ) << "KeepWorking alone " << this << " " << kwid;
return { (void*)kwid, [this] ( void* kwid ) {
m_tService.work_finished (); // divided to lines for breakpoints
LOGINFO ( SERVICE_KEEP_ALONE, MT ) << "KeepWorking alone inished " << this << " " << (intptr_t)kwid; } };
}
#else
Keeper_t KeepWorking() final
{
m_tService.work_started();
return { nullptr, [this] ( void* ) { m_tService.work_finished(); } };
}
#endif
void StopAll () final {}
static int GetRunners () { return m_iRunningAlones; }
int Works () const final
{
return GetRunners ();
}
NTasks_t Tasks() const noexcept final
{
return m_tService.tasks();
}
int CurTasks() const noexcept final
{
return !!m_bBusy.load(std::memory_order_relaxed);
}
const char* Name() const override
{
return m_sName.cstr();
}
};
int AloneThread_c::m_iRunningAlones = 0;
class ShedulerWrapper_c final : public Scheduler_i
{
Scheduler_i* m_pScheduler; // not owned
const char* m_szName;
public:
ShedulerWrapper_c ( Scheduler_i* pScheduler, const char* szName ) noexcept
: m_pScheduler { pScheduler }
, m_szName { szName }
{}
void ScheduleOp ( details::SchedulerOperation_t* pOp, bool bVip ) final
{
m_pScheduler->ScheduleOp ( pOp, bVip );
}
void ScheduleContinuationOp ( details::SchedulerOperation_t* pOp ) final
{
m_pScheduler->ScheduleContinuationOp ( pOp );
}
Keeper_t KeepWorking() final
{
return m_pScheduler->KeepWorking();
};
int WorkingThreads() const final
{
return m_pScheduler->WorkingThreads();
};
const char* Name() const final
{
return m_szName ? m_szName : m_pScheduler->Name();
}
};
WorkerSharedPtr_t MakeThreadPool ( size_t iThreadCount, const char* szName )
{
return WorkerSharedPtr_t { new ThreadPool_c ( iThreadCount, szName ) };
}
WorkerSharedPtr_t MakeAloneThread ( size_t iOrderNum, const char* szName )
{
return WorkerSharedPtr_t { new AloneThread_c ( (int)iOrderNum, szName ) };
}
// Alone scheduler works on top of another scheduler and provides sequental execution of the tasks (each time only one
// task may be performed, no concurrent execution). It also gives FIFO ordering of the tasks.
SchedulerSharedPtr_t MakeAloneScheduler ( Scheduler_i* pBase, const char* szName )
{
return SchedulerSharedPtr_t { new Strand_c ( pBase, szName ) };
}
// wraps raw scheduler into shared-ptr (it will NOT delete the scheduler when destroyed!)
SchedulerSharedPtr_t WrapRawScheduler ( Scheduler_i* pBase, const char* szName )
{
return SchedulerSharedPtr_t { new ShedulerWrapper_c ( pBase, szName ) };
}
} // namespace Threads
namespace {
RwLock_t & g_lShutdownGuard ()
{
static RwLock_t lShutdownGuard;
return lShutdownGuard;
}
OpSchedule_t & g_dShutdownList ()
{
static OpSchedule_t dShutdownList GUARDED_BY ( g_lShutdownGuard () );
return dShutdownList;
}
OpSchedule_t & g_dOnForkList ()
{
static OpSchedule_t dOnForkList GUARDED_BY ( g_lShutdownGuard () );
return dOnForkList;
}
}
void searchd::AddShutdownCb ( Handler fnCb )
{
auto pCb = Threads::details::Handler2Op ( std::move ( fnCb ) );
ScWL_t tGuard ( g_lShutdownGuard() );
g_dShutdownList().Push_front( pCb );
}
void searchd::AddOnForkCleanupCb ( Threads::Handler fnCb )
{
auto pCb = Threads::details::Handler2Op ( std::move ( fnCb ) );
ScWL_t tGuard ( g_lShutdownGuard () );
g_dOnForkList ().Push_front ( pCb );
}
// invoke shutdown handlers
void searchd::FireShutdownCbs ()
{
ScRL_t tGuard ( g_lShutdownGuard() );
while ( !g_dShutdownList().Empty () )
{
auto * pOp = g_dShutdownList().Front ();
g_dShutdownList().Pop ();
pOp->Complete ( pOp );
}
}
void searchd::CleanAfterFork () NO_THREAD_SAFETY_ANALYSIS
{
while ( !g_dOnForkList ().Empty () )
{
auto * pOp = g_dOnForkList ().Front ();
g_dOnForkList ().Pop ();
pOp->Complete ( pOp );
}
while ( !g_dShutdownList().Empty () )
{
auto * pOp = g_dShutdownList().Front ();
g_dShutdownList().Pop ();
pOp->Destroy();
}
}
static int g_iMaxChildrenThreads = 1;
namespace {
static WorkerSharedPtr_t pGlobalPool;
WorkerSharedPtr_t& GlobalPoolSingletone ()
{
return pGlobalPool;
}
}
void StartGlobalWorkPool ()
{
sphLogDebug ( "StartGlobalWorkpool" );
WorkerSharedPtr_t& pPool = GlobalPoolSingletone ();
#if !_WIN32
if ( !pPool )
#endif
pPool = new ThreadPool_c ( g_iMaxChildrenThreads, "work" );
}
void StopGlobalWorkPool()
{
sphLogDebug ( "StopGlobalWorkPool" );
WorkerSharedPtr_t& pPool = GlobalPoolSingletone();
if ( pPool )
pPool->StopAll();
}
void SetMaxChildrenThreads ( int iThreads )
{
sphLogDebug ( "SetMaxChildrenThreads to %d", iThreads );
g_iMaxChildrenThreads = Max ( 1, iThreads );
}
Threads::Worker_i * GlobalWorkPool ()
{
WorkerSharedPtr_t& pPool = GlobalPoolSingletone ();
assert ( pPool && "invoke StartGlobalWorkPool first");
return pPool;
}
void WipeGlobalSchedulerOnShutdownAndFork ()
{
#ifndef NDEBUG
static bool bAlreadyInvoked = false;
assert (!bAlreadyInvoked);
bAlreadyInvoked = true;
#endif
Threads::RegisterIterator ( [] ( ThreadFN & fnHandler ) {
WorkerSharedPtr_t& pPool = GlobalPoolSingletone ();
if ( pPool )
pPool->IterateChildren ( fnHandler );
} );
searchd::AddOnForkCleanupCb ( [] {
WorkerSharedPtr_t& pPool = GlobalPoolSingletone ();
if ( pPool )
pPool->DiscardOnFork ();
} );
// searchd::AddShutdownCb ( [] {
// sphWarning ( "stop all pool threads" );
// WorkerSharedPtr_t& pPool = GlobalPoolSingletone ();
// if ( pPool )
// pPool->StopAll ();
// } );
}
void WipeSchedulerOnFork ( Threads::Worker_i* pWorker )
{
Threads::RegisterIterator ( [pWorker] ( ThreadFN& fnHandler ) {
if ( pWorker )
pWorker->IterateChildren ( fnHandler );
} );
searchd::AddOnForkCleanupCb ( [pWorker] {
if ( pWorker )
pWorker->DiscardOnFork();
} );
}
namespace {
static std::atomic<int> g_iRunningThreads {0};
}
int Threads::GetNumOfRunning()
{
return g_iRunningThreads.load ( std::memory_order_relaxed );
}
//////////////////////////////////////////////////////////////////////////
/// helpers to iterate over all registered threads
class OperationsQueue_c::Impl_c
{
CSphMutex m_tQueueGuard;
OpSchedule_t m_tQueue GUARDED_BY ( m_tQueueGuard );
public:
void AddOp ( Handler fnCb )
{
auto pCb = Threads::details::Handler2Op ( std::move ( fnCb ) );
ScopedMutex_t tGuard ( m_tQueueGuard );
m_tQueue.Push_front ( pCb );
}
void RunAll ()
{
OpSchedule_t tQueue;
{
ScopedMutex_t tGuard ( m_tQueueGuard );
if ( m_tQueue.Empty() )
return;
tQueue.Push ( m_tQueue );
}
while ( !tQueue.Empty() )
{
auto* pOp = tQueue.Front();
tQueue.Pop();
pOp->Complete ( pOp );
}
}
bool IsEmpty() const NO_THREAD_SAFETY_ANALYSIS
{
return m_tQueue.Empty();
}
~Impl_c()
{
while ( !m_tQueue.Empty () )
{
auto * pOp = m_tQueue.Front ();
m_tQueue.Pop ();
pOp->Destroy ();
}
}
};
OperationsQueue_c::OperationsQueue_c()
: m_pImpl ( new Impl_c )
{}
OperationsQueue_c::~OperationsQueue_c()
{
SafeDelete ( m_pImpl );
}
void OperationsQueue_c::AddOp (Handler fnOp)
{
assert ( m_pImpl );
m_pImpl->AddOp(std::move(fnOp));
}
void OperationsQueue_c::RunAll()
{
assert ( m_pImpl );
m_pImpl->RunAll();
}
bool OperationsQueue_c::IsEmpty() const
{
assert ( m_pImpl );
return m_pImpl->IsEmpty();
}
namespace { // static
class IterationHandler_c : public Threads::details::SchedulerOperation_t
{
ThreadIteratorFN m_Handler;
public:
explicit IterationHandler_c ( ThreadIteratorFN h )
: SchedulerOperation_t ( &IterationHandler_c::DoComplete )
, m_Handler ( std::move ( h ) )
{}
static void DoComplete ( void * pOwner, SchedulerOperation_t * pBase )
{
auto * pHandler = (IterationHandler_c *) pBase;
if ( pOwner )
pHandler->m_Handler ( *(ThreadFN *) pOwner );
else
delete pHandler;
}
};
struct IteratorsQueue_t
{
RwLock_t m_tQueueGuard;
OpSchedule_t m_tQueue GUARDED_BY ( m_tQueueGuard );
void RegisterIterator ( ThreadIteratorFN fnIterator )
{
auto pCb = ( new IterationHandler_c ( std::move ( fnIterator ) ) );
ScWL_t tGuard ( m_tQueueGuard );
m_tQueue.Push_front ( pCb );
}
// iterate over all (pooled and alone) threads.
// over pooled we're not using locks, since pool is living 'as whole', so no lock accessing individual elem need.
// iteration func, however, must check if param is nullptr.
// note, non-iteratable threads can't use hazard pointers (just nobody knows they're hold something).
void IterateActive ( ThreadFN fnHandler )
{
ScRL_t tGuard ( m_tQueueGuard );
for ( auto & dOp : m_tQueue )
dOp.Complete ( &fnHandler );
}
~IteratorsQueue_t()
{
ScWL_t tGuard ( m_tQueueGuard );
while ( !m_tQueue.Empty () )
{
auto * pOp = m_tQueue.Front ();
m_tQueue.Pop ();
pOp->Destroy();
}
}
};
IteratorsQueue_t g_dIteratorsList;
}
void Threads::RegisterIterator ( ThreadIteratorFN fnIterator )
{
g_dIteratorsList.RegisterIterator ( std::move ( fnIterator ) );
}
void Threads::IterateActive ( ThreadFN fnHandler )
{
g_dIteratorsList.IterateActive ( std::move ( fnHandler ) );
}
Threads::Scheduler_i * MakeSingleThreadExecutor ( int iMaxThreads, const char * szName )
{
if ( iMaxThreads>0 && Threads::AloneThread_c::GetRunners ()>=iMaxThreads )
return nullptr;
static int iOrder = 0;
return new Threads::AloneThread_c ( iOrder++, szName? szName: "alone" );
}
#if !_WIN32
void * Threads::Init ( bool bDetached )
#else
void * Threads::Init ( bool )
#endif
{
static bool bInit = false;
#if !_WIN32
static pthread_attr_t tJoinableAttr;
static pthread_attr_t tDetachedAttr;
#endif
if ( !bInit )
{
#if SPH_DEBUG_LEAKS || SPH_ALLOCS_PROFILER
sphMemStatInit();
#endif
#if !_WIN32
if ( pthread_attr_init ( &tJoinableAttr ) )
sphDie ( "FATAL: pthread_attr_init( joinable ) failed" );
if ( pthread_attr_init ( &tDetachedAttr ) )
sphDie ( "FATAL: pthread_attr_init( detached ) failed" );
if ( pthread_attr_setdetachstate ( &tDetachedAttr, PTHREAD_CREATE_DETACHED ) )
sphDie ( "FATAL: pthread_attr_setdetachstate( detached ) failed" );
#endif
bInit = true;
}
#if !_WIN32
if ( pthread_attr_setstacksize ( &tJoinableAttr, STACK_SIZE ) )
sphDie ( "FATAL: pthread_attr_setstacksize( joinable ) failed" );
if ( pthread_attr_setstacksize ( &tDetachedAttr, STACK_SIZE ) )
sphDie ( "FATAL: pthread_attr_setstacksize( detached ) failed" );
return bDetached ? &tDetachedAttr : &tJoinableAttr;
#else
return NULL;
#endif
}
#if SPH_DEBUG_LEAKS || SPH_ALLOCS_PROFILER
void Threads::Done ( int iFD )
{
sphMemStatDump ( iFD );
sphMemStatDone();
}
#else
void Threads::Done ( int )
{
}
#endif
/// get name of a thread
CSphString Threads::GetName ( const SphThread_t * pThread )
{
if ( !pThread || !*pThread )
return "";
#if HAVE_PTHREAD_GETNAME_NP
char sClippedName[16];
pthread_getname_np ( *pThread, sClippedName, 16 );
return sClippedName;
#else
return "";
#endif
}
/// my join thread wrapper
bool Threads::Join ( SphThread_t * pThread )
{
#if _WIN32
DWORD uWait = WaitForSingleObject ( *pThread, INFINITE );
CloseHandle ( *pThread );
*pThread = NULL;
return ( uWait==WAIT_OBJECT_0 || uWait==WAIT_ABANDONED );
#else
return pthread_join ( *pThread, nullptr )==0;
#endif
}
/// my own thread
SphThread_t Threads::Self ()
{
#if _WIN32
return GetCurrentThread();
#else
return pthread_self ();
#endif
}
/// compares two thread ids
bool Threads::Same ( const LowThreadDesc_t * pFirst, const LowThreadDesc_t * pSecond )
{
if ( !pFirst && !pSecond )
return true;
if ( !pFirst || !pSecond )
return false;
#if _WIN32
// can not use m_tThread on Windows as GetCurrentThread returns -2 and that handle valid only inside thread itself
return ( pFirst->m_iThreadID==pSecond->m_iThreadID );
#else
return pthread_equal ( pFirst->m_tThread, pSecond->m_tThread )!=0;
#endif
}
struct RuntimeThreadContext_t : ISphNoncopyable
{
LowThreadDesc_t m_tDesc;
const void * m_pMyThreadStack = nullptr;
Handler m_fnRun;
#if USE_GPROF
pthread_mutex_t m_dlock;
pthread_cond_t m_dwait;
itimerval m_ditimer;
#endif
#if SPH_ALLOCS_PROFILER
void * m_pTLS = nullptr;
#endif
// main thread execution func
void Run ( const void * pStack );
// prepare everything to make *this most robust
void Prepare ( const void * pStack );
// save name stored in desc as OS thread name
void PropagateName ();
};
namespace {
RuntimeThreadContext_t tStubForMain;
thread_local RuntimeThreadContext_t* g_pLocalThread = &tStubForMain;
}
// to be used globally from thread env
RuntimeThreadContext_t& MyThreadContext()
{
return *g_pLocalThread;
}
LowThreadDesc_t& Threads::MyThd () noexcept
{
return g_pLocalThread->m_tDesc;
}
void Threads::SetSysThreadName ()
{
g_pLocalThread->PropagateName ();
}
void Threads::JobStarted ()
{
auto& tDesc = Threads::MyThd ();
tDesc.m_tmLastJobDoneTimeUS = -1;
tDesc.m_tmLastJobStartTimeUS = sphMicroTimer ();
tDesc.m_tmLastJobStartCPUTimeUS = sphThreadCpuTimer ();
}
void Threads::JobFinished ( bool bIsDone )
{
auto & tDesc = Threads::MyThd ();
tDesc.m_tmLastJobDoneTimeUS = sphMicroTimer ();
if ( bIsDone )
++tDesc.m_iTotalJobsDone;
tDesc.m_tmTotalWorkedTimeUS += tDesc.m_tmLastJobDoneTimeUS-tDesc.m_tmLastJobStartTimeUS;
tDesc.m_tmTotalWorkedCPUTimeUS += sphThreadCpuTimer()-tDesc.m_tmLastJobStartCPUTimeUS;
}
const void * Threads::TopOfStack ()
{
return MyThreadContext().m_pMyThreadStack;
}
void Threads::SetTopStack ( const void * pNewStack )
{
MyThreadContext ().m_pMyThreadStack = pNewStack;
}
namespace {
int& MaxCoroStackSize()
{
static int iMaxCoroStackSize = 1024 * 1024;
return iMaxCoroStackSize;
}
}
void Threads::SetMaxCoroStackSize ( int iStackSize )
{
MaxCoroStackSize() = iStackSize;
}
int Threads::GetMaxCoroStackSize()
{
return MaxCoroStackSize();
}
void Threads::PrepareMainThread ( const void * PStack )
{
MyThreadContext ().Prepare ( PStack );
}
void RuntimeThreadContext_t::PropagateName ()
{
// set name of self
#if HAVE_PTHREAD_SETNAME_NP
if ( !m_tDesc.m_sThreadName.IsEmpty() )
{
auto sSafeName = m_tDesc.m_sThreadName.SubString ( 0, 15 );
assert ( sSafeName.cstr ()!=nullptr );
#if HAVE_PTHREAD_SETNAME_NP_1ARG
pthread_setname_np ( sSafeName.cstr() );
#else
pthread_setname_np ( m_tDesc.m_tThread, sSafeName.cstr() );
#endif
}
#endif
}
void RuntimeThreadContext_t::Prepare ( const void * pStack )
{
m_pMyThreadStack = pStack;
m_tDesc.m_iThreadID = GetOsThreadId ();
m_tDesc.m_tmStart = sphMicroTimer();
m_tDesc.m_pTaskInfo.store ( nullptr, std::memory_order_release );
m_tDesc.m_pHazards.store ( nullptr, std::memory_order_release );
m_tDesc.m_tThread = Threads::Self ();
#if USE_GPROF
// Set the profile timer value
setitimer ( ITIMER_PROF, &m_ditimer, NULL );
// Tell the calling thread that we don't need its data anymore
pthread_mutex_lock ( &m_dlock );
pthread_cond_signal ( &m_dwait );
pthread_mutex_unlock ( &m_dlock );
#endif
PropagateName ();
}
void RuntimeThreadContext_t::Run ( const void * pStack )
{
g_pLocalThread = this;
Prepare ( pStack );
#if SPH_ALLOCS_PROFILER
m_pTLS = sphMemStatThdInit();
#endif
g_iRunningThreads.fetch_add ( 1, std::memory_order_acq_rel );
LOG( DEBUG, MT ) << "thread created";
m_fnRun();
LOG( DEBUG, MT ) << "thread ended";
g_iRunningThreads.fetch_sub ( 1, std::memory_order_acq_rel );
#if SPH_ALLOCS_PROFILER
sphMemStatThdCleanup ( m_pTLS );
#endif
}
#if _WIN32
DWORD __stdcall ThreadProcWrapper_fn ( void * pArg )
#else
void * ThreadProcWrapper_fn ( void * pArg )
#endif
{
// This is the first local variable in the new thread. So, its address is the top of the stack.
// We need to know thread stack size for both expression and query evaluating engines.
// We store expressions as a linked tree of structs and execution is a calls of mutually
// recursive methods. Before executing we compute tree height and multiply it by a constant
// with experimentally measured value to check whether we have enough stack to execute current query.
// The check is not ideal and do not work for all compilers and compiler settings.
char cTopOfMyStack;
std::unique_ptr<RuntimeThreadContext_t> pCtx { (RuntimeThreadContext_t *) pArg };
pCtx->Run ( &cTopOfMyStack );
return 0;
}
bool Threads::Create ( SphThread_t * pThread, Handler fnRun, bool bDetached, const char * sName, int iNum )
{
// we can not put this on current stack because wrapper need to see
// it all the time and it will destroy this data from heap by itself
auto pCtx = std::make_unique<RuntimeThreadContext_t>();
pCtx->m_fnRun = std::move ( fnRun );
if ( sName )
{
if ( iNum<0 )
pCtx->m_tDesc.m_sThreadName = sName;
else
pCtx->m_tDesc.m_sThreadName.SetSprintf ( "%s_%d", sName, iNum );
}
// create thread
#if _WIN32
Threads::Init ( bDetached );
*pThread = CreateThread ( NULL, STACK_SIZE, ThreadProcWrapper_fn, pCtx.get(), 0, NULL );
if ( *pThread )
{
pCtx.release();
return true;
}
#else
#if USE_GPROF
getitimer ( ITIMER_PROF, &pCtx->m_ditimer );
pthread_cond_init ( &pCtx->m_dwait, NULL );
pthread_mutex_init ( &pCtx->m_dlock, NULL );
pthread_mutex_lock ( &pCtx->m_dlock );
#endif
void * pAttr = Threads::Init ( bDetached );
errno = pthread_create ( pThread, (pthread_attr_t*) pAttr, ThreadProcWrapper_fn, pCtx.get() );
#if USE_GPROF
if ( !errno )
pthread_cond_wait ( &pCtx->m_dwait, &pCtx->m_dlock );
pthread_mutex_unlock ( &pCtx->m_dlock );
pthread_mutex_destroy ( &pCtx->m_dlock );
pthread_cond_destroy ( &pCtx->m_dwait );
#endif
if ( !errno )
{
pCtx.release();
return true;
}
#endif // _WIN32
// thread creation failed so we need to cleanup ourselves
return false;
}
// Thread with crash query
namespace { // static func
thread_local CrashQuery_t* pTlsCrashQuery = nullptr;
CrashQuery_t** g_ppTlsCrashQuery ()
{
return &pTlsCrashQuery;
}
void GlobalSetTopQueryTLS ( CrashQuery_t * pQuery )
{
*g_ppTlsCrashQuery() = pQuery;
}
void GlobalCrashQuerySet ( const CrashQuery_t & tQuery )
{
CrashQuery_t * pQuery = *g_ppTlsCrashQuery();
assert ( pQuery );
*pQuery = tQuery;
}
}
static CrashQuery_t g_tUnhandled;
CrashQuery_t & GlobalCrashQueryGetRef ()
{
CrashQuery_t * pQuery = *g_ppTlsCrashQuery ();
// in case TLS not set \ found handler still should process crash
if ( pQuery )
return *pQuery;
sphWarning ("GlobalCrashQueryGetRef: thread-local info is not set! Use ad-hoc");
return g_tUnhandled;
}
CrashQueryKeeper_c::CrashQueryKeeper_c ()
: m_tReference ( GlobalCrashQueryGetRef() )
{}
CrashQueryKeeper_c::~CrashQueryKeeper_c ()
{
RestoreCrashQuery();
}
void CrashQueryKeeper_c::RestoreCrashQuery () const
{
GlobalCrashQuerySet ( m_tReference );
}
namespace
{
constexpr char dWeekdays[7][4] = { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" };
constexpr char dMonths[12][4] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" };
}
/// format current timestamp (for logging, or whatever)
int sphFormatCurrentTime ( char* sTimeBuf, int iBufLen )
{
int64_t iNow = sphMicroTimer();
time_t ts = (time_t)( iNow / 1000000 ); // on some systems (eg. FreeBSD 6.2), tv.tv_sec has another type and we can't just pass it
cctz::civil_second tCS = ConvertTimeLocal(ts);
return snprintf ( sTimeBuf, iBufLen, "%.3s %.3s%3d %.2d:%.2d:%.2d.%.3d %d", dWeekdays[GetWeekDay ( tCS, true )-1], dMonths[tCS.month()-1], tCS.day(), tCS.hour(), tCS.minute(), tCS.second(), (int)( ( iNow % 1000000 ) / 1000 ), (int)tCS.year() );
}
void sphFormatCurrentTime ( StringBuilder_c& sOut )
{
int64_t iNow = sphMicroTimer();
time_t ts = (time_t)( iNow / 1000000 ); // on some systems (eg. FreeBSD 6.2), tv.tv_sec has another type, and we can't just pass it
cctz::civil_second tCS = ConvertTimeLocal(ts);
sOut << dWeekdays[GetWeekDay ( tCS, true )-1]
<< ' ' << dMonths[tCS.month()-1]
<< ' ' << Digits<2>(tCS.day())
<< ' ' << Digits<2>(tCS.hour()) << ':' << Digits<2>(tCS.minute()) << ':' << Digits<2>(tCS.second()) << '.' << FixedNum<10,3,0,'0'>( ( iNow % 1000000 ) / 1000 )
<< ' ' << tCS.year();
}
CSphString sphCurrentUtcTime()
{
int64_t iNow = sphMicroTimer();
time_t ts = (time_t)( iNow / 1000000 ); // on some systems (eg. FreeBSD 6.2), tv.tv_sec has another type and we can't just pass it
cctz::civil_second tCS = ConvertTimeUTC(ts);
StringBuilder_c tOut;
tOut << tCS.year()
<< '-' << Digits<2>(tCS.month())
<< '-' << Digits<2>(tCS.day())
<< 'T' << Digits<2>(tCS.hour()) << ':' << Digits<2>(tCS.minute()) << ':' << Digits<2>(tCS.second())
<< '.' << FixedNum<10, 3, 0, '0'> ( ( iNow % 1000000 ) / 1000 );
// tOut.Sprintf ( "%.4d-%.2d-%.2dT%.2d:%.2d:%.2d.%.3d", // YYYY-MM-DDThh:mm:ss[.SSS]
// 1900 + tmp.tm_year,
// tmp.tm_mon + 1,
// tmp.tm_mday,
// tmp.tm_hour,
// tmp.tm_min,
// tmp.tm_sec,
// (int)( ( iNow % 1000000 ) / 1000 ) );
CSphString sRes;
tOut.MoveTo ( sRes );
return sRes;
}
// create thread for query - it will have set CrashQuery to valid obj inside, alive during whole thread's live time.
bool Threads::CreateQ ( SphThread_t * pThread, Handler fnRun, bool bDetached, const char * sName, int iNum )
{
return Create ( pThread, [fnCrashRun = std::move ( fnRun )]
{
CrashQuery_t tQueryTLS;
GlobalSetTopQueryTLS ( &tQueryTLS );
LOG( DEBUG, MT ) << "thread created";
fnCrashRun();
LOG( DEBUG, MT ) << "thread ended";
}, bDetached, sName, iNum );
}
// capture crash query and set it before running fnHandler.
Threads::Handler Threads::WithCopiedCrashQuery ( Threads::Handler fnHandler )
{
CrashQuery_t tParentCrashQuery = GlobalCrashQueryGetRef ();
return [tCrashQuery = tParentCrashQuery, fnHandler = std::move ( fnHandler )] {
// CrashQueryKeeper_c _; // restore previous crash query on exit. Seems, that is not necessary
GlobalCrashQuerySet ( tCrashQuery );
fnHandler ();
};
}
| 47,152
|
C++
|
.cpp
| 1,548
| 28.042636
| 245
| 0.693973
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,835
|
indexformat.cpp
|
manticoresoftware_manticoresearch/src/indexformat.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "indexformat.h"
#if WITH_RE2
#include <string>
#include <re2/re2.h>
#endif
// let uDocs be DWORD here to prevent int overflow in case of hitless word (highest bit is 1)
int DoclistHintUnpack ( DWORD uDocs, BYTE uHint )
{
if ( uDocs<(DWORD)DOCLIST_HINT_THRESH )
return (int)Min ( 8*(int64_t)uDocs, INT_MAX );
else
return (int)Min ( 4*(int64_t)uDocs+( int64_t(uDocs)*uHint/64 ), INT_MAX );
}
//////////////////////////////////////////////////////////////////////////
DiskIndexQwordTraits_c::DiskIndexQwordTraits_c ( bool bUseMini, bool bExcluded )
{
m_bExcluded = bExcluded;
if ( bUseMini )
{
m_pDocsBuf = m_dDoclistBuf;
m_pHitsBuf = m_dHitlistBuf;
}
}
void DiskIndexQwordTraits_c::SetDocReader ( DataReaderFactory_c * pReader )
{
if ( !pReader )
return;
m_rdDoclist = pReader->MakeReader ( m_pDocsBuf, MINIBUFFER_LEN );
}
void DiskIndexQwordTraits_c::SetHitReader ( DataReaderFactory_c * pReader )
{
if ( !pReader )
return;
m_rdHitlist = pReader->MakeReader ( m_pHitsBuf, MINIBUFFER_LEN );
}
void DiskIndexQwordTraits_c::ResetDecoderState ()
{
ISphQword::Reset();
m_uHitPosition = 0;
m_uInlinedHit = 0;
m_uHitState = 0;
m_tDoc.m_tRowID = INVALID_ROWID;
m_iHitPos = EMPTY_HIT;
}
//////////////////////////////////////////////////////////////////////////
class CheckpointReader_c
{
public:
const BYTE * ReadEntry ( const BYTE * pBuf, CSphWordlistCheckpoint & tCP ) const
{
tCP.m_uWordID = (SphWordID_t)sphUnalignedRead ( *(SphOffset_t *)pBuf );
pBuf += sizeof(SphOffset_t);
tCP.m_iWordlistOffset = sphUnalignedRead ( *(SphOffset_t *)pBuf );
pBuf += sizeof(SphOffset_t);
return pBuf;
}
int GetStride() const { return m_iSrcStride; }
private:
int m_iSrcStride = 2*sizeof(SphOffset_t);
};
struct MappedCheckpoint_fn : public ISphNoncopyable
{
const CSphWordlistCheckpoint * m_pDstStart;
const BYTE * m_pSrcStart;
const CheckpointReader_c * m_pReader;
MappedCheckpoint_fn ( const CSphWordlistCheckpoint * pDstStart, const BYTE * pSrcStart, const CheckpointReader_c * pReader )
: m_pDstStart ( pDstStart )
, m_pSrcStart ( pSrcStart )
, m_pReader ( pReader )
{}
CSphWordlistCheckpoint operator() ( const CSphWordlistCheckpoint * pCP ) const
{
assert ( m_pDstStart<=pCP );
const BYTE * pCur = ( pCP - m_pDstStart ) * m_pReader->GetStride() + m_pSrcStart;
CSphWordlistCheckpoint tEntry;
m_pReader->ReadEntry ( pCur, tEntry );
return tEntry;
}
};
//////////////////////////////////////////////////////////////////////////
struct DiskExpandedEntry_t
{
int m_iNameOff;
int m_iDocs;
int m_iHits;
};
struct DiskExpandedPayload_t
{
int m_iDocs;
int m_iHits;
uint64_t m_uDoclistOff;
int m_iDoclistHint;
};
struct Slice64_t
{
uint64_t m_uOff;
int m_iLen;
};
struct DiskSubstringPayload_t : public ISphSubstringPayload
{
explicit DiskSubstringPayload_t ( int iDoclists )
: m_dDoclist ( iDoclists )
{}
CSphFixedVector<Slice64_t> m_dDoclist;
};
//////////////////////////////////////////////////////////////////////////
struct DictEntryDiskPayload_t : public DictTerm2Expanded_i
{
DictEntryDiskPayload_t ( bool bPayload, ESphHitless eHitless )
{
m_bPayload = bPayload;
m_eHitless = eHitless;
if ( bPayload )
m_dWordPayload.Reserve ( 1000 );
m_dWordExpand.Reserve ( 1000 );
m_dWordBuf.Reserve ( 8096 );
}
void Add ( const DictEntry_t & tWord, int iWordLen )
{
if ( !m_bPayload || !sphIsExpandedPayload ( tWord.m_iDocs, tWord.m_iHits ) ||
m_eHitless==SPH_HITLESS_ALL || ( m_eHitless==SPH_HITLESS_SOME && ( tWord.m_iDocs & HITLESS_DOC_FLAG )!=0 ) ) // FIXME!!! do we need hitless=some as payloads?
{
DiskExpandedEntry_t & tExpand = m_dWordExpand.Add();
int iOff = m_dWordBuf.GetLength();
tExpand.m_iNameOff = iOff;
tExpand.m_iDocs = tWord.m_iDocs;
tExpand.m_iHits = tWord.m_iHits;
m_dWordBuf.Resize ( iOff + iWordLen + 1 );
memcpy ( m_dWordBuf.Begin() + iOff + 1, tWord.m_szKeyword, iWordLen );
m_dWordBuf[iOff] = (BYTE)iWordLen;
} else
{
DiskExpandedPayload_t & tExpand = m_dWordPayload.Add();
tExpand.m_iDocs = tWord.m_iDocs;
tExpand.m_iHits = tWord.m_iHits;
tExpand.m_uDoclistOff = tWord.m_iDoclistOffset;
tExpand.m_iDoclistHint = tWord.m_iDoclistHint;
}
}
void Convert ( ISphWordlist::Args_t & tArgs ) override
{
if ( !m_dWordExpand.GetLength() && !m_dWordPayload.GetLength() )
return;
int iTotalDocs = 0;
int iTotalHits = 0;
if ( m_dWordExpand.GetLength() )
{
LimitExpanded ( tArgs.m_iExpansionLimit, m_dWordExpand );
const BYTE * sBase = m_dWordBuf.Begin();
ARRAY_FOREACH ( i, m_dWordExpand )
{
const DiskExpandedEntry_t & tCur = m_dWordExpand[i];
int iDocs = tCur.m_iDocs;
if ( m_eHitless==SPH_HITLESS_SOME )
iDocs = ( tCur.m_iDocs & HITLESS_DOC_MASK );
tArgs.AddExpanded ( sBase + tCur.m_iNameOff + 1, sBase[tCur.m_iNameOff], iDocs, tCur.m_iHits );
iTotalDocs += iDocs;
iTotalHits += tCur.m_iHits;
}
tArgs.m_tExpansionStats.m_iTerms += m_dWordExpand.GetLength();
}
if ( m_dWordPayload.GetLength() )
{
LimitExpanded ( tArgs.m_iExpansionLimit, m_dWordPayload );
std::unique_ptr<DiskSubstringPayload_t> pPayload ( new DiskSubstringPayload_t ( m_dWordPayload.GetLength() ) );
// sorting by ascending doc-list offset gives some (15%) speed-up too
sphSort ( m_dWordPayload.Begin(), m_dWordPayload.GetLength(), bind ( &DiskExpandedPayload_t::m_uDoclistOff ) );
ARRAY_FOREACH ( i, m_dWordPayload )
{
const DiskExpandedPayload_t & tCur = m_dWordPayload[i];
assert ( m_eHitless==SPH_HITLESS_NONE || ( m_eHitless==SPH_HITLESS_SOME && ( tCur.m_iDocs & HITLESS_DOC_FLAG )==0 ) );
iTotalDocs += tCur.m_iDocs;
iTotalHits += tCur.m_iHits;
pPayload->m_dDoclist[i].m_uOff = tCur.m_uDoclistOff;
pPayload->m_dDoclist[i].m_iLen = tCur.m_iDoclistHint;
}
pPayload->m_iTotalDocs = iTotalDocs;
pPayload->m_iTotalHits = iTotalHits;
tArgs.m_pPayload = std::move ( pPayload );
tArgs.m_tExpansionStats.m_iMerged += m_dWordPayload.GetLength();
}
tArgs.m_iTotalDocs = iTotalDocs;
tArgs.m_iTotalHits = iTotalHits;
}
// sort expansions by frequency desc
// clip the less frequent ones if needed, as they are likely misspellings
template < typename T >
void LimitExpanded ( int iExpansionLimit, CSphVector<T> & dVec ) const
{
if ( !iExpansionLimit || dVec.GetLength()<=iExpansionLimit )
return;
sphSort ( dVec.Begin(), dVec.GetLength(), ExpandedOrderDesc_T<T>() );
dVec.Resize ( iExpansionLimit );
}
bool m_bPayload;
ESphHitless m_eHitless;
CSphVector<DiskExpandedEntry_t> m_dWordExpand;
CSphVector<DiskExpandedPayload_t> m_dWordPayload;
CSphVector<BYTE> m_dWordBuf;
};
//////////////////////////////////////////////////////////////////////////
CWordlist::~CWordlist ()
{
Reset();
}
void CWordlist::Reset ()
{
m_tBuf.Reset ();
m_dCheckpoints.Reset ( 0 );
m_pWords.Reset ( 0 );
SafeDeleteArray ( m_pInfixBlocksWords );
SafeDelete ( m_pCpReader );
}
bool CWordlist::Preread ( const CSphString & sName, bool bWordDict, int iSkiplistBlockSize, CSphString & sError )
{
assert ( m_iDictCheckpointsOffset>0 );
m_bWordDict = bWordDict;
m_iWordsEnd = m_iDictCheckpointsOffset; // set wordlist end
m_iSkiplistBlockSize = iSkiplistBlockSize;
////////////////////////////
// preload word checkpoints
////////////////////////////
////////////////////////////
// fast path for CRC checkpoints - just maps data and use inplace CP reader
if ( !bWordDict )
{
if ( !m_tBuf.Setup ( sName, sError ) )
return false;
m_pCpReader = new CheckpointReader_c;
return true;
}
////////////////////////////
// regular path that loads checkpoints data
CSphAutoreader tReader;
if ( !tReader.Open ( sName, sError ) )
return false;
int64_t iFileSize = tReader.GetFilesize();
int iCheckpointOnlySize = (int)(iFileSize-m_iDictCheckpointsOffset);
if ( m_iInfixCodepointBytes && m_iInfixBlocksOffset )
iCheckpointOnlySize = (int)(m_iInfixBlocksOffset - g_sTagInfixBlocks.second - m_iDictCheckpointsOffset);
if ( iFileSize-m_iDictCheckpointsOffset>=UINT_MAX )
{
sError.SetSprintf ( "dictionary meta overflow: meta size=" INT64_FMT ", total size=" INT64_FMT ", meta offset=" INT64_FMT,
iFileSize-m_iDictCheckpointsOffset, iFileSize, (int64_t)m_iDictCheckpointsOffset );
return false;
}
tReader.SeekTo ( m_iDictCheckpointsOffset, iCheckpointOnlySize );
assert ( m_bWordDict );
int iArenaSize = iCheckpointOnlySize
- (sizeof(DWORD)+sizeof(SphOffset_t))*m_dCheckpoints.GetLength()
+ sizeof(BYTE)*m_dCheckpoints.GetLength();
assert ( iArenaSize>=0 );
m_pWords.Reset ( iArenaSize );
BYTE * pWord = m_pWords.Begin();
for ( auto & dCheckpoint : m_dCheckpoints )
{
dCheckpoint.m_szWord = (char *)pWord;
const int iLen = tReader.GetDword();
assert ( iLen>0 );
assert ( iLen + 1 + ( pWord - m_pWords.Begin() )<=iArenaSize );
tReader.GetBytes ( pWord, iLen );
pWord[iLen] = '\0';
pWord += iLen+1;
dCheckpoint.m_iWordlistOffset = tReader.GetOffset();
}
////////////////////////
// preload infix blocks
////////////////////////
if ( m_iInfixCodepointBytes && m_iInfixBlocksOffset )
{
// reading to vector as old version doesn't store total infix words length
CSphTightVector<BYTE> dInfixWords;
dInfixWords.Reserve ( (int)m_iInfixBlocksWordsSize );
tReader.SeekTo ( m_iInfixBlocksOffset, (int)(iFileSize-m_iInfixBlocksOffset) );
m_dInfixBlocks.Resize ( tReader.UnzipInt() );
for ( auto & dInfixBlock : m_dInfixBlocks )
{
int iBytes = tReader.UnzipInt();
int iOff = dInfixWords.GetLength();
dInfixBlock.m_iInfixOffset = (DWORD) iOff; /// FIXME! name convention of m_iInfixOffset
dInfixWords.Resize ( iOff+iBytes+1 );
tReader.GetBytes ( dInfixWords.Begin()+iOff, iBytes );
dInfixWords[iOff+iBytes] = '\0';
dInfixBlock.m_iOffset = tReader.UnzipInt();
}
// fix-up offset to pointer
m_pInfixBlocksWords = dInfixWords.LeakData();
ARRAY_FOREACH ( i, m_dInfixBlocks )
m_dInfixBlocks[i].m_sInfix = (const char *)m_pInfixBlocksWords + m_dInfixBlocks[i].m_iInfixOffset;
// FIXME!!! store and load that explicitly
if ( m_dInfixBlocks.GetLength() )
m_iWordsEnd = m_dInfixBlocks.Begin()->m_iOffset - g_sTagInfixEntries.second;
else
m_iWordsEnd -= g_sTagInfixEntries.second;
}
if ( tReader.GetErrorFlag() )
{
sError = tReader.GetErrorMessage();
return false;
}
tReader.Close();
// mapping up only wordlist without meta (checkpoints, infixes, etc.)
return m_tBuf.Setup ( sName, sError );
}
void CWordlist::DebugPopulateCheckpoints()
{
if ( !m_pCpReader )
return;
const BYTE * pCur = m_tBuf.GetReadPtr() + m_iDictCheckpointsOffset;
ARRAY_FOREACH ( i, m_dCheckpoints )
pCur = m_pCpReader->ReadEntry ( pCur, m_dCheckpoints[i] );
SafeDelete(m_pCpReader);
}
const CSphWordlistCheckpoint * CWordlist::FindCheckpointCrc ( SphWordID_t iWordID ) const
{
if ( m_pCpReader ) // FIXME!!! fall to regular checkpoints after data got read
{
MappedCheckpoint_fn tPred ( m_dCheckpoints.Begin(), m_tBuf.GetReadPtr() + m_iDictCheckpointsOffset, m_pCpReader );
return sphSearchCheckpointCrc( iWordID, m_dCheckpoints, std::move(tPred));
}
return sphSearchCheckpointCrc ( iWordID, m_dCheckpoints );
}
const CSphWordlistCheckpoint * CWordlist::FindCheckpointWrd ( const char* sWord, int iWordLen, bool bStarMode ) const
{
if ( m_pCpReader ) // FIXME!!! fall to regular checkpoints after data got read
{
MappedCheckpoint_fn tPred ( m_dCheckpoints.Begin(), m_tBuf.GetReadPtr() + m_iDictCheckpointsOffset, m_pCpReader );
return sphSearchCheckpointWrd ( sWord, iWordLen, bStarMode, m_dCheckpoints, std::move ( tPred ) );
}
return sphSearchCheckpointWrd ( sWord, iWordLen, bStarMode, m_dCheckpoints );
}
bool CWordlist::GetWord ( const BYTE * pBuf, SphWordID_t iWordID, DictEntry_t & tWord ) const
{
SphWordID_t iLastID = 0;
SphOffset_t uLastOff = 0;
while (true)
{
// unpack next word ID
const SphWordID_t iDeltaWord = UnzipWordidBE ( pBuf ); // FIXME! slow with 32bit wordids
if ( iDeltaWord==0 ) // wordlist chunk is over
return false;
iLastID += iDeltaWord;
// list is sorted, so if there was no match, there's no such word
if ( iLastID>iWordID )
return false;
// unpack next offset
const SphOffset_t iDeltaOffset = UnzipOffsetBE ( pBuf );
uLastOff += iDeltaOffset;
// unpack doc/hit count
const int iDocs = UnzipIntBE ( pBuf );
const int iHits = UnzipIntBE ( pBuf );
SphOffset_t iSkiplistPos = 0;
if ( iDocs > m_iSkiplistBlockSize )
iSkiplistPos = UnzipOffsetBE ( pBuf );
assert ( iDeltaOffset );
assert ( iDocs );
assert ( iHits );
// it matches?!
if ( iLastID==iWordID )
{
UnzipWordidBE ( pBuf ); // might be 0 at checkpoint
const SphOffset_t iDoclistLen = UnzipOffsetBE ( pBuf );
tWord.m_iDoclistOffset = uLastOff;
tWord.m_iDocs = iDocs;
tWord.m_iHits = iHits;
tWord.m_iDoclistHint = (int)iDoclistLen;
tWord.m_iSkiplistOffset = iSkiplistPos;
return true;
}
}
}
const BYTE * CWordlist::AcquireDict ( const CSphWordlistCheckpoint * pCheckpoint ) const
{
assert ( pCheckpoint );
assert ( m_dCheckpoints.GetLength() );
assert ( pCheckpoint>=m_dCheckpoints.Begin() && pCheckpoint<=&m_dCheckpoints.Last() );
SphOffset_t iOff = pCheckpoint->m_iWordlistOffset;
if ( m_pCpReader )
{
MappedCheckpoint_fn tPred ( m_dCheckpoints.Begin(), m_tBuf.GetReadPtr() + m_iDictCheckpointsOffset, m_pCpReader );
iOff = tPred ( pCheckpoint ).m_iWordlistOffset;
}
assert ( !m_tBuf.IsEmpty() );
assert ( iOff>0 && iOff<(int64_t)m_tBuf.GetLengthBytes() );
return m_tBuf.GetReadPtr()+iOff;
}
void CWordlist::GetPrefixedWords ( const char * sSubstring, int iSubLen, const char * sWildcard, Args_t & tArgs ) const
{
assert ( sSubstring && *sSubstring && iSubLen>0 );
// empty index?
if ( !m_dCheckpoints.GetLength() )
return;
DictEntryDiskPayload_t tDict2Payload ( tArgs.m_bPayload, tArgs.m_eHitless );
int dWildcard [ SPH_MAX_WORD_LEN + 1 ];
int * pWildcard = ( sphIsUTF8 ( sWildcard ) && sphUTF8ToWideChar ( sWildcard, dWildcard, SPH_MAX_WORD_LEN ) ) ? dWildcard : NULL;
// assume dict=crc never has word with wordid=0, however just don't consider it and explicitly set nullptr.
const CSphWordlistCheckpoint * pCheckpoint = m_bWordDict ? FindCheckpointWrd ( sSubstring, iSubLen, true ) : nullptr;
const int iSkipMagic = ( BYTE(*sSubstring)<0x20 ); // whether to skip heading magic chars in the prefix, like NONSTEMMED maker
while ( pCheckpoint )
{
// decode wordlist chunk
KeywordsBlockReader_c tDictReader ( AcquireDict ( pCheckpoint ), m_iSkiplistBlockSize );
while ( tDictReader.UnpackWord() )
{
// block is sorted
// so once keywords are greater than the prefix, no more matches
int iCmp = sphDictCmp ( sSubstring, iSubLen, (const char *)tDictReader.m_szKeyword, tDictReader.GetWordLen() );
if ( iCmp<0 )
break;
if ( sphInterrupted() )
break;
// does it match the prefix *and* the entire wildcard?
if ( iCmp==0 && sphWildcardMatch ( (const char *)tDictReader.m_szKeyword + iSkipMagic, sWildcard, pWildcard ) )
tDict2Payload.Add ( tDictReader, tDictReader.GetWordLen() );
}
if ( sphInterrupted () )
break;
pCheckpoint++;
if ( pCheckpoint > &m_dCheckpoints.Last() )
break;
if ( sphDictCmp ( sSubstring, iSubLen, pCheckpoint->m_szWord, (int) strlen ( pCheckpoint->m_szWord ) )<0 )
break;
}
tDict2Payload.Convert ( tArgs );
}
void CWordlist::GetInfixedWords ( const char * sSubstring, int iSubLen, const char * sWildcard, Args_t & tArgs ) const
{
// dict must be of keywords type, and fully cached
// mmap()ed in the worst case, should we ever banish it to disk again
if ( m_tBuf.IsEmpty() || !m_dCheckpoints.GetLength() )
return;
assert ( !m_pCpReader );
// extract key1, upto 6 chars from infix start
int iBytes1 = sphGetInfixLength ( sSubstring, iSubLen, m_iInfixCodepointBytes );
// lookup key1
// OPTIMIZE? maybe lookup key2 and reduce checkpoint set size, if possible?
CSphVector<DWORD> dPoints;
if ( !sphLookupInfixCheckpoints ( sSubstring, iBytes1, m_tBuf.GetReadPtr(), m_dInfixBlocks, m_iInfixCodepointBytes, dPoints ) )
return;
DictEntryDiskPayload_t tDict2Payload ( tArgs.m_bPayload, tArgs.m_eHitless );
const int iSkipMagic = ( tArgs.m_bHasExactForms ? 1 : 0 ); // whether to skip heading magic chars in the prefix, like NONSTEMMED maker
int dWildcard [ SPH_MAX_WORD_LEN + 1 ];
int * pWildcard = ( sphIsUTF8 ( sWildcard ) && sphUTF8ToWideChar ( sWildcard, dWildcard, SPH_MAX_WORD_LEN ) ) ? dWildcard : NULL;
// walk those checkpoints, check all their words
ARRAY_FOREACH ( i, dPoints )
{
// OPTIMIZE? add a quicker path than a generic wildcard for "*infix*" case?
KeywordsBlockReader_c tDictReader ( m_tBuf.GetReadPtr() + m_dCheckpoints[dPoints[i]-1].m_iWordlistOffset, m_iSkiplistBlockSize );
while ( tDictReader.UnpackWord() )
{
if ( sphInterrupted () )
break;
// stemmed terms should not match suffixes
if ( tArgs.m_bHasExactForms && *tDictReader.m_szKeyword!=MAGIC_WORD_HEAD_NONSTEMMED )
continue;
if ( sphWildcardMatch ( (const char *)tDictReader.m_szKeyword+iSkipMagic, sWildcard, pWildcard ) )
tDict2Payload.Add ( tDictReader, tDictReader.GetWordLen() );
}
if ( sphInterrupted () )
break;
}
tDict2Payload.Convert ( tArgs );
}
#if WITH_RE2
struct RegexMatch_t
{
std::unique_ptr<RE2> m_pRe { nullptr };
std::unique_ptr<DictEntryDiskPayload_t> m_pPayload { nullptr };
};
#endif
void CWordlist::ScanRegexWords ( const VecTraits_T<RegexTerm_t> & dTerms, const ISphWordlist::Args_t & tArgs, const VecExpandConv_t & dConverters ) const
{
// dict must be of keywords type, and fully cached
// mmap()ed in the worst case, should we ever banish it to disk again
if ( m_tBuf.IsEmpty() || !m_dCheckpoints.GetLength() )
return;
assert ( dTerms.GetLength() && dTerms.GetLength()==dConverters.GetLength() );
#if WITH_RE2
CSphFixedVector<RegexMatch_t> dRegex ( dTerms.GetLength() );
RE2::Options tOptions;
tOptions.set_encoding ( RE2::Options::Encoding::EncodingUTF8 );
ARRAY_FOREACH ( i, dRegex )
{
dRegex[i].m_pRe = std::make_unique<RE2> ( dTerms[i].first.cstr(), tOptions );
dRegex[i].m_pPayload = std::make_unique<DictEntryDiskPayload_t> ( tArgs.m_bPayload, tArgs.m_eHitless );
assert ( dRegex[i].m_pRe && dRegex[i].m_pPayload );
}
const int iSkipMagic = ( tArgs.m_bHasExactForms ? 1 : 0 ); // whether to skip heading magic chars in the prefix, like NONSTEMMED maker
// walk those checkpoints, check all their words
ARRAY_FOREACH ( i, m_dCheckpoints )
{
const auto & tCP = m_dCheckpoints[i];
KeywordsBlockReader_c tDictReader ( m_tBuf.GetReadPtr() + tCP.m_iWordlistOffset, m_iSkiplistBlockSize );
while ( tDictReader.UnpackWord() )
{
if ( sphInterrupted () )
break;
// stemmed terms should not match suffixes
if ( tArgs.m_bHasExactForms && *tDictReader.m_szKeyword!=MAGIC_WORD_HEAD_NONSTEMMED )
continue;
int iLen = tDictReader.GetWordLen();
re2::StringPiece sDictToken ( (const char *)tDictReader.m_szKeyword+iSkipMagic, iLen );
ARRAY_FOREACH ( i, dRegex )
{
if ( RE2::FullMatchN ( sDictToken, *dRegex[i].m_pRe, nullptr, 0 ) )
dRegex[i].m_pPayload->Add ( tDictReader, iLen );
}
}
if ( sphInterrupted () )
break;
}
ARRAY_FOREACH ( i, dRegex )
dConverters[i] = std::move( dRegex[i].m_pPayload );
#endif
}
void CWordlist::SuffixGetChekpoints ( const SuggestResult_t & , const char * sSuffix, int iLen, CSphVector<DWORD> & dCheckpoints ) const
{
sphLookupInfixCheckpoints ( sSuffix, iLen, m_tBuf.GetReadPtr(), m_dInfixBlocks, m_iInfixCodepointBytes, dCheckpoints );
}
void CWordlist::SetCheckpoint ( SuggestResult_t & tRes, DWORD iCP ) const
{
assert ( tRes.m_pWordReader );
KeywordsBlockReader_c * pReader = (KeywordsBlockReader_c *)tRes.m_pWordReader;
pReader->Reset ( m_tBuf.GetReadPtr() + m_dCheckpoints[iCP-1].m_iWordlistOffset );
}
bool CWordlist::ReadNextWord ( SuggestResult_t & tRes, DictWord_t & tWord ) const
{
KeywordsBlockReader_c * pReader = (KeywordsBlockReader_c *)tRes.m_pWordReader;
if ( !pReader->UnpackWord() )
return false;
tWord.m_sWord = pReader->GetWord();
tWord.m_iLen = pReader->GetWordLen();
tWord.m_iDocs = pReader->m_iDocs;
return true;
}
//////////////////////////////////////////////////////////////////////////
KeywordsBlockReader_c::KeywordsBlockReader_c ( const BYTE * pBuf, int iSkiplistBlockSize )
: m_iSkiplistBlockSize ( iSkiplistBlockSize )
{
Reset ( pBuf );
}
void KeywordsBlockReader_c::Reset ( const BYTE * pBuf )
{
m_pBuf = pBuf;
m_sWord[0] = '\0';
m_iLen = 0;
m_szKeyword = m_sWord.data();
}
bool KeywordsBlockReader_c::UnpackWord()
{
if ( !m_pBuf )
return false;
assert ( m_iSkiplistBlockSize>0 );
// unpack next word
// must be in sync with DictEnd()!
BYTE uPack = *m_pBuf++;
if ( !uPack )
{
// ok, this block is over
m_pBuf = NULL;
m_iLen = 0;
return false;
}
int iMatch, iDelta;
if ( uPack & 0x80 )
{
iDelta = ( ( uPack>>4 ) & 7 ) + 1;
iMatch = uPack & 15;
} else
{
iDelta = uPack & 127;
iMatch = *m_pBuf++;
}
assert ( iMatch+iDelta<(int)sizeof(m_sWord)-1 );
assert ( iMatch<=(int)strlen ( (char *)m_sWord.data() ) );
memcpy ( m_sWord.data() + iMatch, m_pBuf, iDelta );
m_pBuf += iDelta;
m_iLen = iMatch + iDelta;
m_sWord[m_iLen] = '\0';
m_iDoclistOffset = UnzipOffsetBE ( m_pBuf );
m_iDocs = UnzipIntBE ( m_pBuf );
m_iHits = UnzipIntBE ( m_pBuf );
m_uHint = ( m_iDocs>=DOCLIST_HINT_THRESH ) ? *m_pBuf++ : 0;
m_iDoclistHint = DoclistHintUnpack ( m_iDocs, m_uHint );
if ( m_iDocs > m_iSkiplistBlockSize )
m_iSkiplistOffset = UnzipOffsetBE ( m_pBuf );
else
m_iSkiplistOffset = 0;
assert ( m_iLen>0 );
return true;
}
static int g_iExpandMergeDocs = 32;
static int g_iExpandMergeHits = 256;
bool sphIsExpandedPayload ( int iDocs, int iHits )
{
return ( iHits<g_iExpandMergeHits || iDocs<g_iExpandMergeDocs );
}
void ExpandedMergeThdDocs ( int iDocs )
{
g_iExpandMergeDocs = iDocs;
}
void ExpandedMergeThdHits ( int iHits )
{
g_iExpandMergeHits = iHits;
}
////////////////////////////////////////////////////////////////////
void IndexWriteHeader ( const BuildHeader_t & tBuildHeader, const WriteHeader_t & tWriteHeader, JsonEscapedBuilder& sJson, bool bForceWordDict, bool SkipEmbeddDict )
{
auto _ = sJson.ObjectW();
// human-readable sugar
sJson.NamedString ( "meta_created_time_utc", sphCurrentUtcTime() );
// version
sJson.NamedVal ( "index_format_version", INDEX_FORMAT_VERSION );
// index stats - json (put here to be similar with .meta)
sJson.NamedValNonDefault ( "total_documents", tBuildHeader.m_iTotalDocuments );
sJson.NamedValNonDefault ( "total_bytes", tBuildHeader.m_iTotalBytes );
// schema
sJson.NamedVal ( "schema", *tWriteHeader.m_pSchema );
// index settings
sJson.NamedVal ( "index_settings", *tWriteHeader.m_pSettings );
// tokenizer info
assert ( tWriteHeader.m_pTokenizer );
sJson.Named ( "tokenizer_settings" );
SaveTokenizerSettings ( sJson, tWriteHeader.m_pTokenizer, tWriteHeader.m_pSettings->m_iEmbeddedLimit );
// dictionary info
assert ( tWriteHeader.m_pDict );
sJson.Named ( "dictionary_settings" );
SaveDictionarySettings ( sJson, tWriteHeader.m_pDict, bForceWordDict, SkipEmbeddDict ? 0 : tWriteHeader.m_pSettings->m_iEmbeddedLimit );
// wordlist checkpoints - json
sJson.NamedValNonDefault ( "dict_checkpoints_offset", tBuildHeader.m_iDictCheckpointsOffset );
sJson.NamedValNonDefault ( "dict_checkpoints", tBuildHeader.m_iDictCheckpoints );
sJson.NamedValNonDefault ( "infix_codepoint_bytes", tBuildHeader.m_iInfixCodepointBytes );
sJson.NamedValNonDefault ( "infix_blocks_offset", tBuildHeader.m_iInfixBlocksOffset );
sJson.NamedValNonDefault ( "infix_block_words_size", tBuildHeader.m_iInfixBlocksWordsSize );
sJson.NamedValNonDefault ( "docinfo", tBuildHeader.m_iDocinfo );
sJson.NamedValNonDefault ( "docinfo_index", tBuildHeader.m_iDocinfoIndex );
sJson.NamedValNonDefault ( "min_max_index", tBuildHeader.m_iMinMaxIndex );
// field filter info
CSphFieldFilterSettings tFieldFilterSettings;
if ( tWriteHeader.m_pFieldFilter )
{
tWriteHeader.m_pFieldFilter->GetSettings ( tFieldFilterSettings );
sJson.NamedVal ( "field_filter_settings", tFieldFilterSettings );
}
// average field lengths
if ( tWriteHeader.m_pSettings->m_bIndexFieldLens )
{
sJson.Named ( "index_fields_lens" );
auto _ = sJson.Array();
for ( int i=0; i < tWriteHeader.m_pSchema->GetFieldsCount(); ++i )
{
sJson << tWriteHeader.m_pFieldLens[i];
}
}
}
| 25,060
|
C++
|
.cpp
| 666
| 34.965465
| 165
| 0.705882
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,836
|
knnlib.cpp
|
manticoresoftware_manticoresearch/src/knnlib.cpp
|
//
// Copyright (c) 2020-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "knnlib.h"
#include "columnarlib.h"
#include "sphinxutils.h"
#include "libutils.h"
#include "schema/schema.h"
using Create_fn = knn::KNN_i * (*) ();
using CreateBuilder_fn = knn::Builder_i * (*) ( const knn::Schema_t & tSchema, int64_t iNumElements );
using CreateDistanceCalc_fn = knn::Distance_i * (*) ( const knn::IndexSettings_t & tSettings );
using VersionStr_fn = const char * (*)();
using GetVersion_fn = int (*)();
static void * g_pKNNLib = nullptr;
static Create_fn g_fnCreate = nullptr;
static CreateBuilder_fn g_fnCreateKNNBuilder = nullptr;
static CreateDistanceCalc_fn g_fnCreateDistanceCalc = nullptr;
static VersionStr_fn g_fnVersionStr = nullptr;
/////////////////////////////////////////////////////////////////////
std::unique_ptr<knn::KNN_i> CreateKNN ( CSphString & sError )
{
if ( !IsKNNLibLoaded() )
{
sError = "knn library not loaded";
return nullptr;
}
assert ( g_fnCreate );
return std::unique_ptr<knn::KNN_i> ( g_fnCreate() );
}
std::unique_ptr<knn::Builder_i> CreateKNNBuilder ( const ISphSchema & tSchema, int64_t iNumElements, CSphString & sError )
{
if ( !IsKNNLibLoaded() )
{
sError = "knn library not loaded";
return nullptr;
}
knn::Schema_t tKNNSchema;
// convert our data types to columnar storage data types
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = tSchema.GetAttr(i);
if ( !tAttr.IsIndexedKNN() )
continue;
common::AttrType_e eAttrType = ToColumnarType ( tAttr.m_eAttrType, tAttr.m_tLocator.m_iBitCount );
knn::AttrWithSettings_t tKNNAttr;
(knn::IndexSettings_t &)tKNNAttr = tAttr.m_tKNN;
tKNNAttr.m_sName = tAttr.m_sName.cstr();
tKNNAttr.m_eType = eAttrType;
tKNNSchema.push_back(tKNNAttr);
}
if ( tKNNSchema.empty() )
return nullptr;
assert ( g_fnCreateKNNBuilder );
std::unique_ptr<knn::Builder_i> pBuilder { g_fnCreateKNNBuilder ( tKNNSchema, iNumElements ) };
if ( !pBuilder )
sError = "error creating knn index builder";
return pBuilder;
}
std::unique_ptr<knn::Distance_i> CreateKNNDistanceCalc ( const knn::IndexSettings_t & tSettings )
{
return std::unique_ptr<knn::Distance_i> ( g_fnCreateDistanceCalc(tSettings) );
}
#if HAVE_DLOPEN
bool InitKNN ( CSphString & sError )
{
assert ( !g_pKNNLib );
CSphString sLibfile = TryDifferentPaths ( LIB_MANTICORE_KNN, GetKNNFullpath(), knn::LIB_VERSION );
if ( sLibfile.IsEmpty() )
return true;
if ( !IsSSE42Supported() )
{
sError.SetSprintf ( "MCL requires a CPU that supports SSE 4.2" );
return false;
}
ScopedHandle_c tHandle ( dlopen ( sLibfile.cstr(), RTLD_LAZY | RTLD_LOCAL ) );
if ( !tHandle.Get() )
{
const char * szDlError = dlerror();
sError.SetSprintf ( "dlopen() failed: %s", szDlError ? szDlError : "(null)" );
return true; // if dlopen fails, don't report an error
}
sphLogDebug ( "dlopen(%s)=%p", sLibfile.cstr(), tHandle.Get() );
GetVersion_fn fnGetVersion;
if ( !LoadFunc ( fnGetVersion, tHandle.Get(), "GetKNNLibVersion", sLibfile, sError ) )
return false;
int iLibVersion = fnGetVersion();
if ( iLibVersion!=knn::LIB_VERSION )
{
sError.SetSprintf ( "daemon requires knn library v%d (trying to load v%d)", knn::LIB_VERSION, iLibVersion );
return false;
}
if ( !LoadFunc ( g_fnCreate, tHandle.Get(), "CreateKNN", sLibfile, sError ) ) return false;
if ( !LoadFunc ( g_fnCreateKNNBuilder, tHandle.Get(), "CreateKNNBuilder", sLibfile, sError ) ) return false;
if ( !LoadFunc ( g_fnCreateDistanceCalc, tHandle.Get(), "CreateDistanceCalc", sLibfile, sError ) ) return false;
if ( !LoadFunc ( g_fnVersionStr, tHandle.Get(), "GetKNNLibVersionStr", sLibfile, sError ) ) return false;
g_pKNNLib = tHandle.Leak();
return true;
}
void ShutdownKNN()
{
if ( g_pKNNLib )
dlclose(g_pKNNLib);
}
#else
bool InitKNN ( CSphString & sError ) { return false; }
void ShutdownKNN() {}
#endif
const char * GetKNNVersionStr()
{
if ( !IsKNNLibLoaded() )
return nullptr;
assert ( g_fnVersionStr );
return g_fnVersionStr();
}
bool IsKNNLibLoaded()
{
return !!g_pKNNLib;
}
| 4,445
|
C++
|
.cpp
| 124
| 33.766129
| 122
| 0.703522
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,837
|
columnarfilter.cpp
|
manticoresoftware_manticoresearch/src/columnarfilter.cpp
|
//
// Copyright (c) 2020-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "columnarfilter.h"
#include "collation.h"
#include "attribute.h"
#include "sphinxint.h"
#include "conversion.h"
class ColumnarFilter_c : public ISphFilter
{
public:
ColumnarFilter_c ( const CSphString & sAttrName );
void SetColumnar ( const columnar::Columnar_i * pColumnar ) override;
protected:
CSphString m_sAttrName;
int m_iColumnarCol = -1;
const columnar::Columnar_i * m_pColumnar = nullptr;
std::unique_ptr<columnar::Iterator_i> m_pIterator;
inline ByteBlob_t GetValue ( RowID_t tRowID ) const;
};
ColumnarFilter_c::ColumnarFilter_c ( const CSphString & sAttrName )
: m_sAttrName ( sAttrName )
{}
void ColumnarFilter_c::SetColumnar ( const columnar::Columnar_i * pColumnar )
{
m_pColumnar = pColumnar;
if ( !pColumnar ) // this can happen on RT columnar setup, when we have the filters but each chunk has its own columnar storage
{
m_pIterator.reset();
return;
}
std::string sError; // fixme! report errors
m_pIterator = CreateColumnarIterator ( pColumnar, m_sAttrName.cstr(), sError );
columnar::AttrInfo_t tAttrInfo;
if ( pColumnar->GetAttrInfo ( m_sAttrName.cstr(), tAttrInfo ) )
m_iColumnarCol = tAttrInfo.m_iId;
}
ByteBlob_t ColumnarFilter_c::GetValue ( RowID_t tRowID ) const
{
ByteBlob_t tData;
tData.second = m_pIterator->Get ( tRowID, tData.first );
return tData;
}
//////////////////////////////////////////////////////////////////////////
// direct access to columnar storage to avoid expression overhead
class Filter_SingleValueColumnar_c : public ColumnarFilter_c
{
using ColumnarFilter_c::ColumnarFilter_c;
public:
void SetValues ( const VecTraits_T<SphAttr_t>& tValues ) final;
bool Eval ( const CSphMatch & tMatch ) const override { return m_pIterator->Get ( tMatch.m_tRowID )==m_tRefValue; }
bool Test ( const columnar::MinMaxVec_t & dMinMax ) const final;
protected:
SphAttr_t m_tRefValue;
};
void Filter_SingleValueColumnar_c::SetValues ( const VecTraits_T<SphAttr_t>& tValues )
{
assert ( tValues.GetLength()==1 );
m_tRefValue = tValues[0];
}
bool Filter_SingleValueColumnar_c::Test ( const columnar::MinMaxVec_t & dMinMax ) const
{
if ( m_iColumnarCol<0 )
return true;
return ( dMinMax[m_iColumnarCol].first<=m_tRefValue && m_tRefValue<=dMinMax[m_iColumnarCol].second );
}
//////////////////////////////////////////////////////////////////////////
class Filter_ValuesColumnar_c : public ColumnarFilter_c
{
using ColumnarFilter_c::ColumnarFilter_c;
public:
bool Eval ( const CSphMatch & tMatch ) const final;
bool Test ( const columnar::MinMaxVec_t & dMinMax ) const final;
void SetValues ( const VecTraits_T<SphAttr_t>& tValues ) final;
private:
VecTraits_T<const SphAttr_t> m_dValues;
bool m_bDegenerate = false;
bool (Filter_ValuesColumnar_c::*m_fnEval)( SphAttr_t tValue ) const = nullptr;
bool (Filter_ValuesColumnar_c::*m_fnEvalBlock)( SphAttr_t tMin, SphAttr_t tMax ) const = nullptr;
bool EvalLinear ( SphAttr_t tValue ) const;
bool EvalBinary ( SphAttr_t tValue ) const;
bool EvalBlockLinear ( SphAttr_t uMin, SphAttr_t uMax ) const;
bool EvalBlockBinary ( SphAttr_t uMin, SphAttr_t uMax ) const;
bool IsDegenerate() const;
};
bool Filter_ValuesColumnar_c::Eval ( const CSphMatch & tMatch ) const
{
if ( m_bDegenerate )
return true;
return (*this.*m_fnEval)( m_pIterator->Get ( tMatch.m_tRowID ) );
}
bool Filter_ValuesColumnar_c::Test ( const columnar::MinMaxVec_t & dMinMax ) const
{
if ( m_iColumnarCol<0 || m_bDegenerate )
return true;
return (*this.*m_fnEvalBlock)( dMinMax[m_iColumnarCol].first, dMinMax[m_iColumnarCol].second );
}
void Filter_ValuesColumnar_c::SetValues ( const VecTraits_T<SphAttr_t>& tValues )
{
assert ( !tValues.IsEmpty() );
#ifndef NDEBUG
for ( int i = 1; i < tValues.GetLength(); ++i )
assert ( tValues[i - 1] <= tValues[i] );
#endif
m_dValues = tValues;
const int SEARCH_THRESH=128;
if ( tValues.GetLength()<SEARCH_THRESH )
{
m_fnEval = &Filter_ValuesColumnar_c::EvalLinear;
m_fnEvalBlock = &Filter_ValuesColumnar_c::EvalBlockLinear;
}
else
{
m_fnEval = &Filter_ValuesColumnar_c::EvalBinary;
m_fnEvalBlock = &Filter_ValuesColumnar_c::EvalBlockBinary;
}
m_bDegenerate = IsDegenerate();
}
bool Filter_ValuesColumnar_c::EvalLinear ( SphAttr_t tValue ) const
{
for ( auto i : m_dValues )
if ( i==tValue )
return true;
return false;
}
bool Filter_ValuesColumnar_c::EvalBinary ( SphAttr_t tValue ) const
{
return !!m_dValues.BinarySearch(tValue);
}
bool Filter_ValuesColumnar_c::EvalBlockLinear ( SphAttr_t uMin, SphAttr_t uMax ) const
{
for ( auto i : m_dValues )
if ( uMin<=i && i<=uMax )
return true;
return false;
}
bool Filter_ValuesColumnar_c::EvalBlockBinary ( SphAttr_t uMin, SphAttr_t uMax ) const
{
// find first value greater or equal than uMin
const SphAttr_t * pFound = sphBinarySearchFirst ( m_dValues.Begin(), m_dValues.End()-1, SphIdentityFunctor_T<SphAttr_t>(), uMin );
if ( *pFound<=uMax )
return true;
return false;
}
bool Filter_ValuesColumnar_c::IsDegenerate() const
{
if ( !m_pColumnar )
return false;
common::Filter_t tFilter;
tFilter.m_sName = m_sAttrName.cstr();
tFilter.m_eType = common::FilterType_e::VALUES;
int iNumValues = m_dValues.GetLength();
tFilter.m_dValues.resize(iNumValues);
if ( iNumValues )
memcpy ( &tFilter.m_dValues[0], m_dValues.Begin(), iNumValues*sizeof ( m_dValues[0] ) );
return m_pColumnar->IsFilterDegenerate(tFilter);
}
//////////////////////////////////////////////////////////////////////////
template <bool MULTI>
class Filter_StringColumnar_T : public ColumnarFilter_c
{
public:
Filter_StringColumnar_T ( const CSphString & sAttrName, ESphCollation eCollation, bool bEquals );
void SetRefString ( const CSphString * pRef, int iCount ) final;
bool Eval ( const CSphMatch & tMatch ) const final;
bool Test ( const columnar::MinMaxVec_t & dMinMax ) const final;
void SetColumnar ( const columnar::Columnar_i * pColumnar ) final;
bool CanExclude() const final { return true; }
protected:
int m_iMinLength = 0;
int m_iMaxLength = 0;
CSphVector<uint64_t> m_dHashes;
bool m_bHasHashes = false;
ESphCollation m_eCollation = SPH_COLLATION_DEFAULT;
StrHashCalc_fn m_fnHashCalc = nullptr;
bool m_bEquals = true;
uint64_t GetStringHash ( RowID_t tRowID ) const;
};
template <bool MULTI>
Filter_StringColumnar_T<MULTI>::Filter_StringColumnar_T ( const CSphString & sAttrName, ESphCollation eCollation, bool bEquals )
: ColumnarFilter_c ( sAttrName )
, m_eCollation ( eCollation )
, m_fnHashCalc ( GetStringHashCalcFunc(eCollation) )
, m_bEquals ( bEquals )
{}
template <bool MULTI>
void Filter_StringColumnar_T<MULTI>::SetRefString ( const CSphString * pRef, int iCount )
{
if_const ( !MULTI )
assert ( iCount<=1 );
m_iMinLength = m_iMaxLength = 0;
for ( int i = 0; i < iCount; i++ )
{
const CSphString & sRef = pRef[i];
int iLength = sRef.Length();
m_iMinLength = Min ( iLength, m_iMinLength );
m_iMaxLength = Max ( iLength, m_iMaxLength );
m_dHashes.Add ( iLength ? m_fnHashCalc ( (const BYTE*)sRef.cstr(), iLength, SPH_FNV64_SEED ) : 0 );
}
}
template <bool MULTI>
bool Filter_StringColumnar_T<MULTI>::Eval ( const CSphMatch & tMatch ) const
{
uint64_t uHash = GetStringHash ( tMatch.m_tRowID );
if_const ( !MULTI )
return ( m_dHashes[0]==uHash ) ^ (!m_bEquals);
for ( auto i : m_dHashes )
if ( i==uHash )
return true ^ (!m_bEquals);
return false ^ (!m_bEquals);
}
template <bool MULTI>
bool Filter_StringColumnar_T<MULTI>::Test ( const columnar::MinMaxVec_t & dMinMax ) const
{
if ( m_iColumnarCol<0 )
return true;
int64_t iMin = dMinMax[m_iColumnarCol].first;
int64_t iMax = dMinMax[m_iColumnarCol].second;
if ( m_bEquals )
return m_iMaxLength>=iMin && m_iMinLength<=iMax;
// reject the case when all strings are empty and we request non-empty strings
if ( !iMin && !iMax && !m_iMinLength && !m_iMaxLength )
return false;
return true;
}
template <bool MULTI>
void Filter_StringColumnar_T<MULTI>::SetColumnar ( const columnar::Columnar_i * pColumnar )
{
if ( !pColumnar )
{
m_pIterator.reset();
return;
}
columnar::IteratorHints_t tHints;
columnar::IteratorCapabilities_t tCapabilities;
tHints.m_bNeedStringHashes = m_eCollation==SPH_COLLATION_DEFAULT;
std::string sError; // fixme! report errors
m_pIterator = CreateColumnarIterator( pColumnar, m_sAttrName.cstr(), sError, tHints, &tCapabilities );
m_bHasHashes = m_pIterator && tCapabilities.m_bStringHashes;
}
template <bool MULTI>
uint64_t Filter_StringColumnar_T<MULTI>::GetStringHash ( RowID_t tRowID ) const
{
if ( m_bHasHashes )
return m_pIterator->Get(tRowID);
const BYTE * pStr = nullptr;
int iLen = m_pIterator->Get ( tRowID, pStr );
if ( !iLen )
return 0;
return m_fnHashCalc ( pStr, iLen, SPH_FNV64_SEED );
}
//////////////////////////////////////////////////////////////////////////
class FilterStringCmpColumnar_c : public ColumnarFilter_c
{
public:
FilterStringCmpColumnar_c ( const CSphString & sAttrName, ESphCollation eCollation, bool bEquals, EStrCmpDir eStrCmpDir );
bool Eval ( const CSphMatch & tMatch ) const final;
void SetRefString ( const CSphString * pRef, int iCount ) final;
void SetColumnar ( const columnar::Columnar_i * pColumnar ) final;
bool CanExclude() const final { return true; }
private:
SphStringCmp_fn m_fnStrCmp;
bool m_bExclude = false;
EStrCmpDir m_eStrCmpDir;
CSphFixedVector<BYTE> m_dVal { 0 };
};
FilterStringCmpColumnar_c::FilterStringCmpColumnar_c ( const CSphString & sAttrName, ESphCollation eCollation, bool bExclude, EStrCmpDir eStrCmpDir )
: ColumnarFilter_c ( sAttrName )
, m_fnStrCmp { GetStringCmpFunc ( eCollation ) }
, m_bExclude ( bExclude )
, m_eStrCmpDir ( eStrCmpDir )
{
}
bool FilterStringCmpColumnar_c::Eval ( const CSphMatch & tMatch ) const
{
const BYTE * pStr = nullptr;
int iLen = m_pIterator->Get ( tMatch.m_tRowID, pStr );
int iCmpResult = m_fnStrCmp ( { pStr, iLen }, m_dVal, false );
switch ( m_eStrCmpDir )
{
case EStrCmpDir::LT: return ( m_bExclude ? iCmpResult>=0 : iCmpResult<0 );
case EStrCmpDir::GT: return ( m_bExclude ? iCmpResult<=0 : iCmpResult>0 );
case EStrCmpDir::EQ:
default:
assert (false && "unexpected: EStrCmpDir::EQ should not be here!");
return false;
}
}
void FilterStringCmpColumnar_c::SetRefString ( const CSphString * pRef, int iCount )
{
assert ( iCount<2 );
const char * sVal = ( pRef ? pRef->cstr() : nullptr );
int iLen = ( pRef ? pRef->Length() : 0 );
m_dVal.Reset ( iLen );
memcpy ( m_dVal.Begin(), sVal, iLen );
}
void FilterStringCmpColumnar_c::SetColumnar ( const columnar::Columnar_i * pColumnar )
{
if ( !pColumnar )
{
m_pIterator.reset();
return;
}
columnar::IteratorHints_t tHints;
tHints.m_bNeedStringHashes = false;
std::string sError; // fixme! report errors
m_pIterator = CreateColumnarIterator( pColumnar, m_sAttrName.cstr(), sError, tHints, nullptr );
}
//////////////////////////////////////////////////////////////////////////
template <typename T, bool HAS_EQUAL_MIN, bool HAS_EQUAL_MAX, bool OPEN_LEFT, bool OPEN_RIGHT>
class Filter_RangeColumnar_T : public ColumnarFilter_c
{
using ColumnarFilter_c::ColumnarFilter_c;
public:
bool Eval ( const CSphMatch & tMatch ) const final;
bool Test ( const columnar::MinMaxVec_t & dMinMax ) const final;
void SetRange ( SphAttr_t tMin, SphAttr_t tMax ) final;
void SetRangeFloat ( float fMin, float fMax ) final;
private:
T m_tMinValue;
T m_tMaxValue;
};
template <typename T, bool HAS_EQUAL_MIN, bool HAS_EQUAL_MAX, bool OPEN_LEFT, bool OPEN_RIGHT>
bool Filter_RangeColumnar_T<T, HAS_EQUAL_MIN, HAS_EQUAL_MAX, OPEN_LEFT, OPEN_RIGHT>::Eval ( const CSphMatch & tMatch ) const
{
SphAttr_t tValue = m_pIterator->Get ( tMatch.m_tRowID );
return EvalRange<HAS_EQUAL_MIN,HAS_EQUAL_MAX,OPEN_LEFT,OPEN_RIGHT> ( ConvertType<T>(tValue), m_tMinValue, m_tMaxValue );
}
template <typename T, bool HAS_EQUAL_MIN, bool HAS_EQUAL_MAX, bool OPEN_LEFT, bool OPEN_RIGHT>
bool Filter_RangeColumnar_T<T, HAS_EQUAL_MIN, HAS_EQUAL_MAX, OPEN_LEFT, OPEN_RIGHT>::Test ( const columnar::MinMaxVec_t & dMinMax ) const
{
if ( m_iColumnarCol<0 )
return true;
T tBlockMin = ConvertType<T> ( dMinMax[m_iColumnarCol].first );
T tBlockMax = ConvertType<T> ( dMinMax[m_iColumnarCol].second );
return EvalBlockRangeAny<HAS_EQUAL_MIN,HAS_EQUAL_MAX> ( tBlockMin, tBlockMax, m_tMinValue, m_tMaxValue );
}
template <typename T, bool HAS_EQUAL_MIN, bool HAS_EQUAL_MAX, bool OPEN_LEFT, bool OPEN_RIGHT>
void Filter_RangeColumnar_T<T, HAS_EQUAL_MIN, HAS_EQUAL_MAX, OPEN_LEFT, OPEN_RIGHT>::SetRange ( SphAttr_t tMin, SphAttr_t tMax )
{
m_tMinValue = ConvertType<T>(tMin);
m_tMaxValue = ConvertType<T>(tMax);
}
template <typename T, bool HAS_EQUAL_MIN, bool HAS_EQUAL_MAX, bool OPEN_LEFT, bool OPEN_RIGHT>
void Filter_RangeColumnar_T<T, HAS_EQUAL_MIN, HAS_EQUAL_MAX, OPEN_LEFT, OPEN_RIGHT>::SetRangeFloat ( float fMin, float fMax )
{
m_tMinValue = (T)fMin;
m_tMaxValue = (T)fMax;
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static std::unique_ptr<ISphFilter> CreateColumnarRangeFilter ( const CSphString & sName, const CSphFilterSettings & tSettings )
{
int iIndex = tSettings.m_bHasEqualMin*8 + tSettings.m_bHasEqualMax*4 + tSettings.m_bOpenLeft*2 + tSettings.m_bOpenRight;
switch ( iIndex )
{
case 0: return std::make_unique<Filter_RangeColumnar_T<T, false, false, false, false>> (sName);
case 1: return std::make_unique<Filter_RangeColumnar_T<T, false, false, false, true>> (sName);
case 2: return std::make_unique<Filter_RangeColumnar_T<T, false, false, true, false>> (sName);
case 3: return std::make_unique<Filter_RangeColumnar_T<T, false, false, true, true>> (sName);
case 4: return std::make_unique<Filter_RangeColumnar_T<T, false, true, false, false>> (sName);
case 5: return std::make_unique<Filter_RangeColumnar_T<T, false, true, false, true>> (sName);
case 6: return std::make_unique<Filter_RangeColumnar_T<T, false, true, true, false>> (sName);
case 7: return std::make_unique<Filter_RangeColumnar_T<T, false, true, true, true>> (sName);
case 8: return std::make_unique<Filter_RangeColumnar_T<T, true, false, false, false>> (sName);
case 9: return std::make_unique<Filter_RangeColumnar_T<T, true, false, false, true>> (sName);
case 10: return std::make_unique<Filter_RangeColumnar_T<T, true, false, true, false>> (sName);
case 11: return std::make_unique<Filter_RangeColumnar_T<T, true, false, true, true>> (sName);
case 12: return std::make_unique<Filter_RangeColumnar_T<T, true, true, false, false>> (sName);
case 13: return std::make_unique<Filter_RangeColumnar_T<T, true, true, false, true>> (sName);
case 14: return std::make_unique<Filter_RangeColumnar_T<T, true, true, true, false>> (sName);
case 15: return std::make_unique<Filter_RangeColumnar_T<T, true, true, true, true>> (sName);
default: return nullptr;
}
}
template < typename T, typename FUNC >
class Filter_SingleValueColumnar_MVA_T : public Filter_SingleValueColumnar_c
{
using Filter_SingleValueColumnar_c::Filter_SingleValueColumnar_c;
public:
bool Eval ( const CSphMatch & tMatch ) const final
{
ByteBlob_t tData = GetValue ( tMatch.m_tRowID );
VecTraits_T<const T> tCheck ( (const T*)tData.first, tData.second/sizeof(T) );
return FUNC::Eval ( tCheck, m_tRefValue );
}
};
//////////////////////////////////////////////////////////////////////////
template < typename T, typename FUNC >
class Filter_ValuesColumnar_MVA_T : public ColumnarFilter_c
{
using ColumnarFilter_c::ColumnarFilter_c;
public:
bool Eval ( const CSphMatch & tMatch ) const final;
bool Test ( const columnar::MinMaxVec_t & dMinMax ) const final;
void SetValues ( const VecTraits_T<SphAttr_t>& tValues ) final;
private:
VecTraits_T<const SphAttr_t> m_dValues;
};
template < typename T, typename FUNC >
bool Filter_ValuesColumnar_MVA_T<T,FUNC>::Eval ( const CSphMatch & tMatch ) const
{
ByteBlob_t tData = GetValue ( tMatch.m_tRowID );
VecTraits_T<const T> tCheck ( (const T*)tData.first, tData.second/sizeof(T) );
return FUNC::Eval ( tCheck, m_dValues );
}
template < typename T, typename FUNC >
bool Filter_ValuesColumnar_MVA_T<T,FUNC>::Test ( const columnar::MinMaxVec_t & dMinMax ) const
{
if ( m_iColumnarCol<0 )
return true;
return FUNC::EvalBlock ( m_dValues, dMinMax[m_iColumnarCol].first, dMinMax[m_iColumnarCol].second );
}
template < typename T, typename FUNC >
void Filter_ValuesColumnar_MVA_T<T,FUNC>::SetValues ( const VecTraits_T<SphAttr_t>& tValues )
{
assert ( !tValues.IsEmpty() );
#ifndef NDEBUG
for ( int i = 1; i < tValues.GetLength(); ++i )
assert ( tValues[i - 1] <= tValues[i] );
#endif
m_dValues = tValues;
}
//////////////////////////////////////////////////////////////////////////
template <typename T, typename FUNC, bool HAS_EQUAL_MIN, bool HAS_EQUAL_MAX, bool OPEN_LEFT, bool OPEN_RIGHT>
class Filter_RangeColumnar_MVA_T : public ColumnarFilter_c
{
using ColumnarFilter_c::ColumnarFilter_c;
public:
bool Eval ( const CSphMatch & tMatch ) const final;
bool Test ( const columnar::MinMaxVec_t & dMinMax ) const final;
void SetRange ( SphAttr_t tMin, SphAttr_t tMax ) final;
private:
SphAttr_t m_tMinValue;
SphAttr_t m_tMaxValue;
};
template <typename T, typename FUNC, bool HAS_EQUAL_MIN, bool HAS_EQUAL_MAX, bool OPEN_LEFT, bool OPEN_RIGHT>
bool Filter_RangeColumnar_MVA_T<T, FUNC, HAS_EQUAL_MIN, HAS_EQUAL_MAX, OPEN_LEFT, OPEN_RIGHT>::Eval ( const CSphMatch & tMatch ) const
{
ByteBlob_t tData = GetValue ( tMatch.m_tRowID );
VecTraits_T<const T> tCheck ( (const T*)tData.first, tData.second/sizeof(T) );
return FUNC::template EvalRange<T,HAS_EQUAL_MIN,HAS_EQUAL_MAX> ( tCheck, m_tMinValue, m_tMaxValue );
}
template <typename T, typename FUNC, bool HAS_EQUAL_MIN, bool HAS_EQUAL_MAX, bool OPEN_LEFT, bool OPEN_RIGHT>
bool Filter_RangeColumnar_MVA_T<T, FUNC, HAS_EQUAL_MIN, HAS_EQUAL_MAX, OPEN_LEFT, OPEN_RIGHT>::Test ( const columnar::MinMaxVec_t & dMinMax ) const
{
if ( m_iColumnarCol<0 )
return true;
return EvalBlockRangeAny<HAS_EQUAL_MIN,HAS_EQUAL_MAX> ( dMinMax[m_iColumnarCol].first, dMinMax[m_iColumnarCol].second, m_tMinValue, m_tMaxValue );
}
template <typename T, typename FUNC, bool HAS_EQUAL_MIN, bool HAS_EQUAL_MAX, bool OPEN_LEFT, bool OPEN_RIGHT>
void Filter_RangeColumnar_MVA_T<T, FUNC, HAS_EQUAL_MIN, HAS_EQUAL_MAX, OPEN_LEFT, OPEN_RIGHT>::SetRange ( SphAttr_t tMin, SphAttr_t tMax )
{
m_tMinValue = tMin;
m_tMaxValue = tMax;
}
//////////////////////////////////////////////////////////////////////////
class Filter_NullColumnar_c : public ISphFilter
{
public:
bool Eval ( const CSphMatch & tMatch ) const final { return true; }
};
//////////////////////////////////////////////////////////////////////////
template < typename T, typename FUNC>
static std::unique_ptr<ISphFilter> CreateColumnarMvaFilterValues ( const CSphString & sName, const CSphFilterSettings & tSettings )
{
if ( tSettings.GetNumValues()==1 )
return std::make_unique<Filter_SingleValueColumnar_MVA_T<T,FUNC>>(sName);
return std::make_unique<Filter_ValuesColumnar_MVA_T<T,FUNC>>(sName);
}
template < typename T, typename FUNC>
static std::unique_ptr<ISphFilter> CreateColumnarMvaRangeFilter ( const CSphString & sName, const CSphFilterSettings & tSettings )
{
int iIndex = tSettings.m_bHasEqualMin*8 + tSettings.m_bHasEqualMax*4 + tSettings.m_bOpenLeft*2 + tSettings.m_bOpenRight;
switch ( iIndex )
{
case 0: return std::make_unique<Filter_RangeColumnar_MVA_T<T, FUNC, false, false, false, false>> (sName);
case 1: return std::make_unique<Filter_RangeColumnar_MVA_T<T, FUNC, false, false, false, true>> (sName);
case 2: return std::make_unique<Filter_RangeColumnar_MVA_T<T, FUNC, false, false, true, false>> (sName);
case 3: return std::make_unique<Filter_RangeColumnar_MVA_T<T, FUNC, false, false, true, true>> (sName);
case 4: return std::make_unique<Filter_RangeColumnar_MVA_T<T, FUNC, false, true, false, false>> (sName);
case 5: return std::make_unique<Filter_RangeColumnar_MVA_T<T, FUNC, false, true, false, true>> (sName);
case 6: return std::make_unique<Filter_RangeColumnar_MVA_T<T, FUNC, false, true, true, false>> (sName);
case 7: return std::make_unique<Filter_RangeColumnar_MVA_T<T, FUNC, false, true, true, true>> (sName);
case 8: return std::make_unique<Filter_RangeColumnar_MVA_T<T, FUNC, true, false, false, false>> (sName);
case 9: return std::make_unique<Filter_RangeColumnar_MVA_T<T, FUNC, true, false, false, true>> (sName);
case 10: return std::make_unique<Filter_RangeColumnar_MVA_T<T, FUNC, true, false, true, false>> (sName);
case 11: return std::make_unique<Filter_RangeColumnar_MVA_T<T, FUNC, true, false, true, true>> (sName);
case 12: return std::make_unique<Filter_RangeColumnar_MVA_T<T, FUNC, true, true, false, false>> (sName);
case 13: return std::make_unique<Filter_RangeColumnar_MVA_T<T, FUNC, true, true, false, true>> (sName);
case 14: return std::make_unique<Filter_RangeColumnar_MVA_T<T, FUNC, true, true, true, false>> (sName);
case 15: return std::make_unique<Filter_RangeColumnar_MVA_T<T, FUNC, true, true, true, true>> (sName);
default: return nullptr;
}
}
static CSphString GetAttributeName ( int iAttr, const ISphSchema & tSchema )
{
const CSphColumnInfo & tAttr = tSchema.GetAttr(iAttr);
// if it is a columnar expression working over an aliased attribute, fetch that attribute name
if ( tAttr.IsColumnarExpr() || tAttr.IsStoredExpr() )
{
CSphString sAliasedCol;
tAttr.m_pExpr->Command ( SPH_EXPR_GET_COLUMNAR_COL, &sAliasedCol );
return sAliasedCol;
}
return tAttr.m_sName;
}
static std::unique_ptr<ISphFilter> CreateColumnarFilterMVA ( int iAttr, const ISphSchema & tSchema, const CSphFilterSettings & tSettings, const CommonFilterSettings_t & tFixedSettings )
{
const CSphColumnInfo & tAttr = tSchema.GetAttr(iAttr);
CSphString sAttrName = GetAttributeName ( iAttr, tSchema );
bool bWide = tAttr.m_eAttrType==SPH_ATTR_INT64SET || tAttr.m_eAttrType==SPH_ATTR_INT64SET_PTR;
bool bRange = tFixedSettings.m_eType==SPH_FILTER_RANGE;
bool bAll = tSettings.m_eMvaFunc==SPH_MVAFUNC_ALL;
int iIndex = bWide*4 + bRange*2 + bAll;
switch ( iIndex )
{
case 0: return CreateColumnarMvaFilterValues<uint32_t,MvaEvalAny_c> ( sAttrName, tSettings );
case 1: return CreateColumnarMvaFilterValues<uint32_t,MvaEvalAll_c> ( sAttrName, tSettings );
case 2: return CreateColumnarMvaRangeFilter<uint32_t,MvaEvalAny_c> ( sAttrName, tSettings );
case 3: return CreateColumnarMvaRangeFilter<uint32_t,MvaEvalAll_c> ( sAttrName, tSettings );
case 4: return CreateColumnarMvaFilterValues<int64_t,MvaEvalAny_c> ( sAttrName, tSettings );
case 5: return CreateColumnarMvaFilterValues<int64_t,MvaEvalAll_c> ( sAttrName, tSettings );
case 6: return CreateColumnarMvaRangeFilter<int64_t,MvaEvalAny_c> ( sAttrName, tSettings );
case 7: return CreateColumnarMvaRangeFilter<int64_t,MvaEvalAll_c> ( sAttrName, tSettings );
default:
assert ( 0 && "Unsupported MVA filter type" );
}
return nullptr;
}
static std::unique_ptr<ISphFilter> CreateColumnarFilterPlain ( int iAttr, const ISphSchema & tSchema, const CSphFilterSettings & tSettings, const CommonFilterSettings_t & tFixedSettings, ESphCollation eCollation )
{
CSphString sAttrName = GetAttributeName ( iAttr, tSchema );
switch ( tFixedSettings.m_eType )
{
case SPH_FILTER_VALUES:
{
if ( tSettings.GetNumValues()==1 )
return std::make_unique<Filter_SingleValueColumnar_c> ( sAttrName );
else
return std::make_unique<Filter_ValuesColumnar_c> ( sAttrName );
}
break;
case SPH_FILTER_RANGE: return CreateColumnarRangeFilter<SphAttr_t> ( sAttrName, tSettings );
case SPH_FILTER_FLOATRANGE: return CreateColumnarRangeFilter<float> ( sAttrName, tSettings );
case SPH_FILTER_STRING:
if ( tSettings.m_eStrCmpDir==EStrCmpDir::EQ )
return std::make_unique<Filter_StringColumnar_T<false>> ( sAttrName, eCollation, !tSettings.m_bExclude );
else
return std::make_unique<FilterStringCmpColumnar_c> ( sAttrName, eCollation, tSettings.m_bExclude, tSettings.m_eStrCmpDir );
case SPH_FILTER_STRING_LIST:return std::make_unique<Filter_StringColumnar_T<true>> ( sAttrName, eCollation, !tSettings.m_bExclude );
default:
assert ( 0 && "Unhandled columnar filter type" );
break;
}
return nullptr;
}
std::unique_ptr<ISphFilter> TryToCreateColumnarFilter ( int iAttr, const ISphSchema & tSchema, const CSphFilterSettings & tSettings, const CommonFilterSettings_t & tFixedSettings, ESphCollation eCollation, CSphString & sError, CSphString & sWarning )
{
if ( iAttr<0 )
return nullptr;
const CSphColumnInfo & tAttr = tSchema.GetAttr(iAttr);
if ( !tAttr.IsColumnar() && !tAttr.IsColumnarExpr() && !tAttr.IsStoredExpr() )
return nullptr;
// when we created a columnar expression, we removed it from PREFILTER stage
// that means that we have to create a specialized filter here because a generic expression filter will no longer work
bool bFound = false;
static const ESphAttr dSupportedTypes[] = { SPH_ATTR_INTEGER, SPH_ATTR_BIGINT, SPH_ATTR_TIMESTAMP, SPH_ATTR_BOOL, SPH_ATTR_FLOAT, SPH_ATTR_STRING, SPH_ATTR_STRINGPTR, SPH_ATTR_UINT32SET, SPH_ATTR_UINT32SET_PTR, SPH_ATTR_INT64SET, SPH_ATTR_INT64SET_PTR };
for ( auto i : dSupportedTypes )
bFound |= tAttr.m_eAttrType==i;
if ( !bFound )
{
assert ( 0 && "Unhandled columnar filter type" );
return nullptr;
}
if ( tFixedSettings.m_eType==SPH_FILTER_NULL )
return std::make_unique<Filter_NullColumnar_c>();
if ( IsMvaAttr(tAttr.m_eAttrType) )
{
if ( tFixedSettings.m_eType!=SPH_FILTER_VALUES && tFixedSettings.m_eType!=SPH_FILTER_RANGE )
{
sError.SetSprintf ( "unsupported filter type '%s' on MVA column", FilterType2Str ( tFixedSettings.m_eType ).cstr() );
return nullptr;
}
if ( tSettings.m_eMvaFunc==SPH_MVAFUNC_NONE )
sWarning.SetSprintf ( "use an explicit ANY()/ALL() around a filter on MVA column" );
return CreateColumnarFilterMVA ( iAttr, tSchema, tSettings, tFixedSettings );
}
return CreateColumnarFilterPlain ( iAttr, tSchema, tSettings, tFixedSettings, eCollation );
}
static common::FilterType_e ToColumnarFilterType ( ESphFilter eType )
{
switch ( eType )
{
case SPH_FILTER_VALUES: return common::FilterType_e::VALUES;
case SPH_FILTER_RANGE: return common::FilterType_e::RANGE;
case SPH_FILTER_FLOATRANGE: return common::FilterType_e::FLOATRANGE;
case SPH_FILTER_STRING:
case SPH_FILTER_STRING_LIST:return common::FilterType_e::STRINGS;
case SPH_FILTER_NULL: return common::FilterType_e::NOTNULL;
default: return common::FilterType_e::NONE;
}
}
static common::MvaAggr_e ToColumnarAggr ( ESphMvaFunc eAggr )
{
switch ( eAggr )
{
case SPH_MVAFUNC_ANY: return common::MvaAggr_e::ANY;
case SPH_MVAFUNC_ALL: return common::MvaAggr_e::ALL;
default: return common::MvaAggr_e::NONE;
}
}
bool ToColumnarFilter ( common::Filter_t & tFilter, const CSphFilterSettings & tSrc, ESphCollation eCollation, const ISphSchema & tSchema, CSphString & sWarning )
{
tFilter.m_eType = ToColumnarFilterType ( tSrc.m_eType );
if ( tFilter.m_eType==common::FilterType_e::NONE )
return false;
tFilter.m_sName = tSrc.m_sAttrName.cstr();
tFilter.m_bExclude = tFilter.m_eType==common::FilterType_e::NOTNULL ? tSrc.m_bIsNull : tSrc.m_bExclude;
tFilter.m_eMvaAggr = ToColumnarAggr ( tSrc.m_eMvaFunc );
tFilter.m_iMinValue = tSrc.m_iMinValue;
tFilter.m_iMaxValue = tSrc.m_iMaxValue;
tFilter.m_fMinValue = tSrc.m_fMinValue;
tFilter.m_fMaxValue = tSrc.m_fMaxValue;
tFilter.m_bLeftUnbounded = tSrc.m_bOpenLeft;
tFilter.m_bRightUnbounded = tSrc.m_bOpenRight;
tFilter.m_bLeftClosed = tSrc.m_bHasEqualMin;
tFilter.m_bRightClosed = tSrc.m_bHasEqualMax;
auto& tValues = tSrc.GetValues();
tFilter.m_dValues.resize(tValues.GetLength());
if ( !tValues.IsEmpty() )
memcpy ( tFilter.m_dValues.data(), tValues.begin(), tValues.GetLengthBytes() );
int iNumStrValues = tSrc.m_dStrings.GetLength();
tFilter.m_dStringValues.resize(iNumStrValues);
for ( int i = 0; i < iNumStrValues; i++ )
{
auto & dDstStr = tFilter.m_dStringValues[i];
dDstStr.resize ( tSrc.m_dStrings[i].Length() );
memcpy ( dDstStr.data(), tSrc.m_dStrings[i].cstr(), dDstStr.size() );
}
const CSphColumnInfo * pAttr = tSchema.GetAttr ( tSrc.m_sAttrName.cstr() );
if ( pAttr && IsMvaAttr ( pAttr->m_eAttrType ) && ( tFilter.m_eMvaAggr==common::MvaAggr_e::NONE ) )
{
sWarning = "use an explicit ANY()/ALL() around a filter on MVA column";
tFilter.m_eMvaAggr = common::MvaAggr_e::ANY;
}
// FIXME! add support for arbitrary collations in columnar storage
tFilter.m_fnCalcStrHash = eCollation==SPH_COLLATION_DEFAULT ? LibcCIHash_fn::Hash : nullptr;
tFilter.m_fnStrCmp = GetStringCmpFunc(eCollation);
return true;
}
bool AddColumnarFilter ( std::vector<common::Filter_t> & dDst, const CSphFilterSettings & tSrc, ESphCollation eCollation, const ISphSchema & tSchema, CSphString & sWarning )
{
common::Filter_t tFilter;
if ( !ToColumnarFilter ( tFilter, tSrc, eCollation, tSchema, sWarning ) )
return false;
dDst.emplace_back ( std::move(tFilter) );
return true;
}
void ToColumnarFilters ( VecTraits_T<const CSphFilterSettings> & dFilters, std::vector<common::Filter_t> & dColumnarFilters, std::vector<int> & dFilterMap, const ISphSchema & tSchema, ESphCollation eCollation, CSphString & sWarning )
{
dFilterMap.resize ( dFilters.GetLength() );
ARRAY_FOREACH ( i, dFilters )
{
dFilterMap[i] = -1;
const CSphColumnInfo * pCol = tSchema.GetAttr ( dFilters[i].m_sAttrName.cstr() );
bool bColumnarFilter = pCol && ( pCol->IsColumnar() || pCol->IsColumnarExpr() || pCol->IsStoredExpr() );
bool bRowIdFilter = dFilters[i].m_sAttrName=="@rowid";
if ( ( bColumnarFilter || bRowIdFilter ) && AddColumnarFilter ( dColumnarFilters, dFilters[i], eCollation, tSchema, sWarning ) )
dFilterMap[i] = (int)dColumnarFilters.size()-1;
}
}
| 30,328
|
C++
|
.cpp
| 678
| 42.650442
| 255
| 0.723097
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,838
|
searchdsql.cpp
|
manticoresoftware_manticoresearch/src/searchdsql.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "searchdsql.h"
#include "sphinxint.h"
#include "sphinxplugin.h"
#include "searchdaemon.h"
#include "searchdddl.h"
#include "sphinxql_debug.h"
#include "sphinxql_second.h"
#include "sphinxql_extra.h"
#include "searchdha.h"
#include "jieba.h"
extern int g_iAgentQueryTimeoutMs; // global (default). May be override by index-scope values, if one specified
void SqlNode_t::SetValueInt ( int64_t iValue )
{
m_uValue = abs(iValue);
m_bNegative = iValue<0;
m_fValue = (float)iValue;
}
void SqlNode_t::SetValueInt ( uint64_t uValue, bool bNegative )
{
m_uValue = uValue;
m_bNegative = bNegative;
m_fValue = bNegative ? -float(uValue) : float(uValue);
}
void SqlNode_t::SetValueFloat ( float fValue )
{
m_fValue = fValue;
m_uValue = abs((int64_t)fValue);
m_bNegative = fValue<0;
}
int64_t SqlNode_t::GetValueInt() const
{
if ( m_bNegative )
{
if ( m_uValue > (uint64_t)LLONG_MAX )
return LLONG_MIN;
return -int64_t(m_uValue);
}
else
{
if ( m_uValue > (uint64_t)LLONG_MAX )
return LLONG_MAX;
return int64_t(m_uValue);
}
}
uint64_t SqlNode_t::GetValueUint() const
{
assert ( !m_bNegative );
return m_uValue;
}
void SqlNode_t::CopyValueInt ( const SqlNode_t & tRhs )
{
m_uValue = tRhs.m_uValue;
m_bNegative = tRhs.m_bNegative;
}
/////////////////////////////////////////////////////////////////////
void SqlInsert_t::SetValueInt ( uint64_t uValue, bool bNegative )
{
m_uValue = uValue;
m_bNegative = bNegative;
}
void SqlInsert_t::SetValueInt ( int64_t iValue )
{
m_uValue = abs(iValue);
m_bNegative = iValue<0;
}
int64_t SqlInsert_t::GetValueInt() const
{
if ( m_bNegative )
{
if ( m_uValue > (uint64_t)LLONG_MAX )
return LLONG_MIN;
return -int64_t(m_uValue);
}
else
{
if ( m_uValue > (uint64_t)LLONG_MAX )
return LLONG_MAX;
return int64_t(m_uValue);
}
}
uint64_t SqlInsert_t::GetValueUint() const
{
assert ( !m_bNegative );
return m_uValue;
}
void SqlInsert_t::CopyValueInt ( const SqlNode_t & tRhs )
{
m_uValue = tRhs.m_uValue;
m_bNegative = tRhs.m_bNegative;
}
/////////////////////////////////////////////////////////////////////
SqlStmt_t::SqlStmt_t()
{
m_tQuery.m_eMode = SPH_MATCH_EXTENDED2; // only new and shiny matching and sorting
m_tQuery.m_eSort = SPH_SORT_EXTENDED;
m_tQuery.m_sSortBy = "@weight desc"; // default order
m_tQuery.m_sOrderBy = "@weight desc";
m_tQuery.m_iAgentQueryTimeoutMs = DEFAULT_QUERY_TIMEOUT;
m_tQuery.m_iRetryCount = DEFAULT_QUERY_RETRY;
m_tQuery.m_iRetryDelay = DEFAULT_QUERY_RETRY;
}
SqlStmt_t::~SqlStmt_t() = default;
bool SqlStmt_t::AddSchemaItem ( const char * psName )
{
m_dInsertSchema.Add ( psName );
CSphString & sAttr = m_dInsertSchema.Last();
sAttr.ToLower();
int iLen = sAttr.Length();
if ( iLen>1 && sAttr.cstr()[0] == '`' && sAttr.cstr()[iLen-1]=='`' )
sAttr = sAttr.SubString ( 1, iLen-2 );
m_iSchemaSz = m_dInsertSchema.GetLength();
return true; // stub; check if the given field actually exists in the schema
}
// check if the number of fields which would be inserted is in accordance to the given schema
bool SqlStmt_t::CheckInsertIntegrity()
{
// cheat: if no schema assigned, assume the size of schema as the size of the first row.
// (if it is wrong, it will be revealed later)
if ( !m_iSchemaSz )
m_iSchemaSz = m_dInsertValues.GetLength();
m_iRowsAffected++;
return m_dInsertValues.GetLength()==m_iRowsAffected*m_iSchemaSz;
}
//////////////////////////////////////////////////////////////////////////
SqlParserTraits_c::SqlParserTraits_c ( CSphVector<SqlStmt_t> & dStmt, const char* szQuery, CSphString* pError )
: m_pBuf ( szQuery )
, m_pParseError ( pError )
, m_dStmt ( dStmt )
{}
void SqlParserTraits_c::PushQuery()
{
assert ( m_dStmt.GetLength() || ( !m_pQuery && !m_pStmt ) );
// add new
m_dStmt.Add ();
m_pStmt = &m_dStmt.Last();
}
CSphString & SqlParserTraits_c::ToString ( CSphString & sRes, const SqlNode_t & tNode ) const
{
if ( tNode.m_iType>=0 )
sRes.SetBinary ( m_pBuf + tNode.m_iStart, tNode.m_iEnd - tNode.m_iStart );
else switch ( tNode.m_iType )
{
case SPHINXQL_TOK_COUNT: sRes = "@count"; break;
case SPHINXQL_TOK_GROUPBY: sRes = "@groupby"; break;
case SPHINXQL_TOK_WEIGHT: sRes = "@weight"; break;
default: assert ( 0 && "internal error: unknown parser ident code" );
}
return sRes;
}
CSphString SqlParserTraits_c::ToStringUnescape ( const SqlNode_t & tNode ) const
{
assert ( tNode.m_iType>=0 );
return SqlUnescape ( m_pBuf + tNode.m_iStart, tNode.m_iEnd - tNode.m_iStart );
}
void SqlParserTraits_c::ProcessParsingError ( const char* szMessage )
{
// 'wrong parser' is quite empiric - we fire it when from very beginning parser sees syntax error
// notice: szMessage here is NOT prefixed with "PXX:"
if ( ( m_pBuf == m_pLastTokenStart ) && ( strncmp ( szMessage, "syntax error", 12 ) == 0 ) )
m_bWrongParserSyntaxError = true;
m_pParseError->SetSprintf ( "%s %s near '%s'", m_sErrorHeader.cstr(), szMessage, m_pLastTokenStart ? m_pLastTokenStart : "(null)" );
// fixup TOK_xxx thingies
char* s = const_cast<char*> ( m_pParseError->cstr() );
char* d = s;
while ( *s )
{
if ( strncmp ( s, "TOK_", 4 ) == 0 )
s += 4;
else
*d++ = *s++;
}
*d = '\0';
}
bool SqlParserTraits_c::IsWrongSyntaxError() const noexcept
{
return m_bWrongParserSyntaxError;
}
void SqlParserTraits_c::DefaultOk ( std::initializer_list<const char*> sList )
{
for ( const char* sElem : sList )
m_pStmt->m_dInsertSchema.Add ( sElem );
m_pStmt->m_eStmt = STMT_DUMMY;
}
void SqlParserTraits_c::SetIndex ( const SqlNode_t& tNode ) const
{
ToString ( m_pStmt->m_sIndex, tNode );
// unquote index name
if ( ( tNode.m_iEnd - tNode.m_iStart ) > 2 && m_pStmt->m_sIndex.cstr()[0] == '\'' && m_pStmt->m_sIndex.cstr()[tNode.m_iEnd - tNode.m_iStart - 1] == '\'' )
m_pStmt->m_sIndex = m_pStmt->m_sIndex.SubString ( 1, m_pStmt->m_sIndex.Length() - 2 );
}
void SqlParserTraits_c::SetIndex ( const CSphString& sIndex ) const
{
auto iLen = sIndex.Length();
if ( iLen > 2 && sIndex.cstr()[0] == '\'' && sIndex.cstr()[iLen-1] == '\'' )
m_pStmt->m_sIndex = sIndex.SubString ( 1, iLen - 2 );
else
m_pStmt->m_sIndex = sIndex;
}
void SqlParserTraits_c::Comment ( const SqlNode_t& tNode ) const
{
}
//////////////////////////////////////////////////////////////////////////
enum class Option_e : BYTE;
class SqlParser_c : public SqlParserTraits_c
{
public:
ESphCollation m_eCollation;
CSphVector<FilterTreeItem_t> m_dFilterTree;
CSphVector<int> m_dFiltersPerStmt;
bool m_bGotFilterOr = false;
public:
SqlParser_c ( CSphVector<SqlStmt_t> & dStmt, ESphCollation eCollation, const char* szQuery, CSphString* pError );
void PushQuery ();
void AddIndexHint ( SecondaryIndexType_e eType, bool bForce, const SqlNode_t & tValue );
void AddItem ( SqlNode_t * pExpr, ESphAggrFunc eFunc=SPH_AGGR_NONE, SqlNode_t * pStart=NULL, SqlNode_t * pEnd=NULL );
bool AddItem ( const char * pToken, SqlNode_t * pStart=NULL, SqlNode_t * pEnd=NULL );
bool AddCount ();
void AliasLastItem ( SqlNode_t * pAlias );
void AddInsval ( CSphVector<SqlInsert_t> & dVec, const SqlNode_t & tNode );
/// called on transition from an outer select to inner select
void ResetSelect();
/// called every time we capture a select list item
/// (i think there should be a simpler way to track these though)
void SetSelect ( SqlNode_t * pStart, SqlNode_t * pEnd=NULL );
bool AddSchemaItem ( SqlNode_t * pNode );
bool SetMatch ( const SqlNode_t & tValue );
bool AddMatch ( const SqlNode_t & tValue, const SqlNode_t & tIndex );
bool SetKNN ( const SqlNode_t & tAttr, const SqlNode_t & tK, const SqlNode_t & tValues, const SqlNode_t * pEf );
void AddConst ( int iList, const SqlNode_t& tValue );
void SetLocalStatement ( const SqlNode_t & tName );
bool AddFloatRangeFilter ( const SqlNode_t & tAttr, float fMin, float fMax, bool bHasEqual, bool bExclude=false );
bool AddFloatFilterGreater ( const SqlNode_t & tAttr, float fVal, bool bHasEqual );
bool AddFloatFilterLesser ( const SqlNode_t & tAttr, float fVal, bool bHasEqual );
bool AddIntRangeFilter ( const SqlNode_t & tAttr, int64_t iMin, int64_t iMax, bool bExclude );
bool AddIntFilterGreater ( const SqlNode_t & tAttr, int64_t iVal, bool bHasEqual );
bool AddIntFilterLesser ( const SqlNode_t & tAttr, int64_t iVal, bool bHasEqual );
bool AddUservarFilter ( const SqlNode_t & tCol, const SqlNode_t & tVar, bool bExclude );
void AddGroupBy ( const SqlNode_t & tGroupBy );
CSphFilterSettings * AddFilter ( const SqlNode_t & tCol, ESphFilter eType );
CSphFilterSettings * AddFilter ( const SqlNode_t & tCol, ESphFilter eType, const RefcountedVector_c<AttrValue_t> & dValues );
bool AddStringFilter ( const SqlNode_t & tCol, const SqlNode_t & tVal, bool bExclude );
bool AddStringCmpFilter ( const SqlNode_t & tCol, const SqlNode_t & tVal, bool bExclude, EStrCmpDir eStrCmpDir );
CSphFilterSettings * AddValuesFilter ( const SqlNode_t & tCol ) { return AddFilter ( tCol, SPH_FILTER_VALUES ); }
CSphFilterSettings * AddValuesFilter ( const SqlNode_t & tCol, const RefcountedVector_c<AttrValue_t> & dValues );
bool AddStringListFilter ( const SqlNode_t & tCol, SqlNode_t & tVal, StrList_e eType, bool bInverse=false );
bool AddNullFilter ( const SqlNode_t & tCol, bool bEqualsNull );
void AddHaving ();
bool SetJoin ( const SqlNode_t & tIdx );
void SetJoinType ( JoinType_e eType );
bool AddOnFilter ( const SqlNode_t & tIdx1, const SqlNode_t & tAttr1, const SqlNode_t & tIdx2, const SqlNode_t & tAttr2, int iTypeCast );
void SetJoinOnCast ( ESphAttr eType ) { m_eJoinTypeCast = eType; }
bool AddDistinct ( SqlNode_t * pNewExpr, SqlNode_t * pStart, SqlNode_t * pEnd );
void AddDistinct ( SqlNode_t * pNewExpr );
bool AddDistinctSort ( SqlNode_t * pNewExpr, SqlNode_t * pStart, SqlNode_t * pEnd, bool bSortAsc );
bool MaybeAddFacetDistinct();
bool SetupFacetStmt();
void FilterGroup ( SqlNode_t & tNode, SqlNode_t & tExpr );
void FilterOr ( SqlNode_t & tNode, const SqlNode_t & tLeft, const SqlNode_t & tRight );
void FilterAnd ( SqlNode_t & tNode, const SqlNode_t & tLeft, const SqlNode_t & tRight );
void SetOp ( SqlNode_t & tNode );
bool SetOldSyntax();
bool SetNewSyntax();
bool IsGoodSyntax();
bool IsDeprecatedSyntax() const;
int AllocNamedVec ();
CSphVector<CSphNamedInt> & GetNamedVec ( int iIndex );
void FreeNamedVec ( int iIndex );
void GenericStatement ( SqlNode_t * pNode );
void SwapSubkeys();
void AddUpdatedAttr ( const SqlNode_t & tName, ESphAttr eType ) const;
void UpdateMVAAttr ( const SqlNode_t & tName, const SqlNode_t& dValues );
void UpdateStringAttr ( const SqlNode_t & tCol, const SqlNode_t & tStr );
void SetGroupbyLimit ( int iLimit );
void SetLimit ( int iOffset, int iLimit );
float ToFloat ( const SqlNode_t & tNode ) const;
int64_t DotGetInt ( const SqlNode_t & tNode ) const;
void AddStringSubkey ( const SqlNode_t & tNode ) const;
void AddIntSubkey ( const SqlNode_t & tNode ) const;
void AddDotIntSubkey ( const SqlNode_t & tNode ) const;
void AddComment ( const SqlNode_t* tNode );
private:
bool m_bMatchClause = false;
bool m_bJoinMatchClause = false;
BYTE m_uSyntaxFlags = 0;
bool m_bNamedVecBusy = false;
CSphVector<CSphNamedInt> m_dNamedVec;
ESphAttr m_eJoinTypeCast = SPH_ATTR_NONE;
void AutoAlias ( CSphQueryItem & tItem, SqlNode_t * pStart, SqlNode_t * pEnd );
bool CheckOption ( Option_e eOption ) const override;
SqlStmt_e GetSecondaryStmt () const;
};
#define YYSTYPE SqlNode_t
// unused parameter, simply to avoid type clash between all my yylex() functions
#define YY_DECL static int my_lex ( YYSTYPE * lvalp, void * yyscanner, SqlParser_c * pParser )
#if _WIN32
#define YY_NO_UNISTD_H 1
#endif
#include "flexsphinxql.c"
static void yyerror ( SqlParserTraits_c * pParser, const char * sMessage )
{
// flex put a zero at last token boundary; make it undo that
yylex_unhold ( pParser->m_pScanner );
pParser->ProcessParsingError(sMessage);
}
#ifndef NDEBUG
// using a proxy to be possible to debug inside yylex
static int yylex ( YYSTYPE * lvalp, SqlParser_c * pParser )
{
int res = my_lex ( lvalp, pParser->m_pScanner, pParser );
return res;
}
#else
static int yylex ( YYSTYPE * lvalp, SqlParser_c * pParser )
{
return my_lex ( lvalp, pParser->m_pScanner, pParser );
}
#endif
#include "bissphinxql.c"
//////////////////////////////////////////////////////////////////////////
SqlParser_c::SqlParser_c ( CSphVector<SqlStmt_t> & dStmt, ESphCollation eCollation, const char* szQuery, CSphString* pError )
: SqlParserTraits_c ( dStmt, szQuery, pError )
, m_eCollation ( eCollation )
{
assert ( m_dStmt.IsEmpty() );
PushQuery ();
m_sErrorHeader = "P01:";
}
void SqlParser_c::PushQuery ()
{
assert ( m_dStmt.GetLength() || ( !m_pQuery && !m_pStmt ) );
// post set proper result-set order
if ( m_dStmt.GetLength() && m_pQuery )
{
if ( m_pQuery->m_sGroupBy.IsEmpty() )
m_pQuery->m_sSortBy = m_pQuery->m_sOrderBy;
else
m_pQuery->m_sGroupSortBy = m_pQuery->m_sOrderBy;
m_dFiltersPerStmt.Add ( m_dFilterTree.GetLength() );
}
SqlParserTraits_c::PushQuery();
m_pQuery = &m_pStmt->m_tQuery;
m_pQuery->m_eCollation = m_eCollation;
m_bMatchClause = false;
}
static bool CheckInteger ( const CSphString & sOpt, const CSphString & sVal, CSphString & sError )
{
const char * p = sVal.cstr();
while ( sphIsInteger ( *p++ ) )
p++;
if ( *p )
{
sError.SetSprintf ( "%s value should be a number: '%s'", sOpt.cstr(), sVal.cstr() );
return false;
}
return true;
}
bool SqlParserTraits_c::CheckInteger ( const CSphString & sOpt, const CSphString & sVal ) const
{
return ::CheckInteger ( sOpt, sVal, *m_pParseError );
}
float SqlParser_c::ToFloat ( const SqlNode_t & tNode ) const
{
return (float) strtod ( m_pBuf+tNode.m_iStart, nullptr );
}
int64_t SqlParser_c::DotGetInt ( const SqlNode_t & tNode ) const
{
return (int64_t) strtoull ( m_pBuf+tNode.m_iStart+1, nullptr, 10 );
}
void SqlParser_c::AddStringSubkey ( const SqlNode_t & tNode ) const
{
auto& sKey = m_pStmt->m_dStringSubkeys.Add();
ToString ( sKey, tNode );
}
void SqlParser_c::AddIntSubkey ( const SqlNode_t & tNode ) const
{
m_pStmt->m_dIntSubkeys.Add ( tNode.GetValueInt() );
}
void SqlParser_c::AddDotIntSubkey ( const SqlNode_t & tNode ) const
{
m_pStmt->m_dIntSubkeys.Add ( DotGetInt ( tNode ) );
}
/// hashes for all options
enum class Option_e : BYTE
{
AGENT_QUERY_TIMEOUT = 0,
BOOLEAN_SIMPLIFY,
COLUMNS,
COMMENT,
CUTOFF,
DEBUG_NO_PAYLOAD, // fixme! document
EXPAND_KEYWORDS,
FIELD_WEIGHTS,
FORMAT,
GLOBAL_IDF,
IDF,
IGNORE_NONEXISTENT_COLUMNS,
IGNORE_NONEXISTENT_INDEXES, // fixme! document!
INDEX_WEIGHTS,
LOCAL_DF,
LOW_PRIORITY,
MAX_MATCHES,
MAX_PREDICTED_TIME,
MAX_QUERY_TIME,
MORPHOLOGY,
RAND_SEED,
RANKER,
RETRY_COUNT,
RETRY_DELAY,
REVERSE_SCAN,
SORT_METHOD,
STRICT_, // dash added because of windows
SYNC,
THREADS,
TOKEN_FILTER,
TOKEN_FILTER_OPTIONS,
NOT_ONLY_ALLOWED,
STORE,
ACCURATE_AGG,
MAXMATCH_THRESH,
DISTINCT_THRESH,
THREADS_EX,
SWITCHOVER,
EXPANSION_LIMIT,
JIEBA_MODE,
INVALID_OPTION
};
static SmallStringHash_T<Option_e, (BYTE) Option_e::INVALID_OPTION * 2> g_hParseOption;
void InitParserOption()
{
const char * dOptions[(BYTE) Option_e::INVALID_OPTION] = { "agent_query_timeout", "boolean_simplify",
"columns", "comment", "cutoff", "debug_no_payload", "expand_keywords", "field_weights", "format", "global_idf",
"idf", "ignore_nonexistent_columns", "ignore_nonexistent_indexes", "index_weights", "local_df", "low_priority",
"max_matches", "max_predicted_time", "max_query_time", "morphology", "rand_seed", "ranker", "retry_count",
"retry_delay", "reverse_scan", "sort_method", "strict", "sync", "threads", "token_filter", "token_filter_options",
"not_terms_only_allowed", "store", "accurate_aggregation", "max_matches_increase_threshold", "distinct_precision_threshold",
"threads_ex", "switchover", "expansion_limit", "jieba_mode" };
for ( BYTE i = 0u; i<(BYTE) Option_e::INVALID_OPTION; ++i )
g_hParseOption.Add ( (Option_e) i, dOptions[i] );
}
static Option_e ParseOption ( const CSphString& sOpt )
{
auto * pCol = g_hParseOption ( sOpt );
return ( pCol ? *pCol : Option_e::INVALID_OPTION );
}
static bool CheckOption ( SqlStmt_e eStmt, Option_e eOption )
{
// trick! following vectors must be sorted, as BinarySearch used to determine presence of a value.
static Option_e dDeleteOptions[] = { Option_e::STORE };
static Option_e dUpdateOptions[] = { Option_e::AGENT_QUERY_TIMEOUT, Option_e::BOOLEAN_SIMPLIFY, Option_e::COMMENT,
Option_e::CUTOFF, Option_e::DEBUG_NO_PAYLOAD, Option_e::EXPAND_KEYWORDS, Option_e::FIELD_WEIGHTS,
Option_e::GLOBAL_IDF, Option_e::IDF, Option_e::IGNORE_NONEXISTENT_COLUMNS,
Option_e::IGNORE_NONEXISTENT_INDEXES, Option_e::INDEX_WEIGHTS, Option_e::LOCAL_DF, Option_e::LOW_PRIORITY,
Option_e::MAX_MATCHES, Option_e::MAX_PREDICTED_TIME, Option_e::MAX_QUERY_TIME, Option_e::MORPHOLOGY,
Option_e::RAND_SEED, Option_e::RANKER, Option_e::RETRY_COUNT, Option_e::RETRY_DELAY, Option_e::REVERSE_SCAN,
Option_e::SORT_METHOD, Option_e::STRICT_, Option_e::THREADS, Option_e::TOKEN_FILTER,
Option_e::NOT_ONLY_ALLOWED };
static Option_e dSelectOptions[] = { Option_e::AGENT_QUERY_TIMEOUT, Option_e::BOOLEAN_SIMPLIFY, Option_e::COLUMNS, Option_e::COMMENT,
Option_e::CUTOFF, Option_e::DEBUG_NO_PAYLOAD, Option_e::EXPAND_KEYWORDS, Option_e::FIELD_WEIGHTS, Option_e::FORMAT,
Option_e::GLOBAL_IDF, Option_e::IDF, Option_e::IGNORE_NONEXISTENT_INDEXES, Option_e::INDEX_WEIGHTS,
Option_e::LOCAL_DF, Option_e::LOW_PRIORITY, Option_e::MAX_MATCHES, Option_e::MAX_PREDICTED_TIME,
Option_e::MAX_QUERY_TIME, Option_e::MORPHOLOGY, Option_e::RAND_SEED, Option_e::RANKER,
Option_e::RETRY_COUNT, Option_e::RETRY_DELAY, Option_e::REVERSE_SCAN, Option_e::SORT_METHOD,
Option_e::THREADS, Option_e::TOKEN_FILTER, Option_e::NOT_ONLY_ALLOWED, Option_e::ACCURATE_AGG,
Option_e::MAXMATCH_THRESH, Option_e::DISTINCT_THRESH, Option_e::THREADS_EX, Option_e::EXPANSION_LIMIT,
Option_e::JIEBA_MODE };
static Option_e dInsertOptions[] = { Option_e::TOKEN_FILTER_OPTIONS };
static Option_e dOptimizeOptions[] = { Option_e::CUTOFF, Option_e::SYNC };
static Option_e dShowOptions[] = { Option_e::COLUMNS, Option_e::FORMAT };
static Option_e dReloadOptions[] = { Option_e::SWITCHOVER };
#define CHKOPT( _set, _val ) VecTraits_T<Option_e> (_set, sizeof(_set)).BinarySearch (_val)!=nullptr
switch ( eStmt )
{
case STMT_DELETE:
return CHKOPT( dDeleteOptions, eOption );
case STMT_UPDATE:
return CHKOPT( dUpdateOptions, eOption );
case STMT_SELECT:
return CHKOPT( dSelectOptions, eOption );
case STMT_INSERT:
case STMT_REPLACE:
return CHKOPT( dInsertOptions, eOption );
case STMT_OPTIMIZE_INDEX:
return CHKOPT( dOptimizeOptions, eOption );
case STMT_RELOAD_INDEX:
return CHKOPT( dReloadOptions, eOption );
case STMT_EXPLAIN:
case STMT_SHOW_PLAN:
case STMT_SHOW_THREADS:
return CHKOPT( dShowOptions, eOption );
default:
return false;
}
#undef CHKOPT
}
// if query is special, like 'select .. from @@system.threads', it can adopt options for 'show threads' also,
// so, provide stmt for extended validation of the option in this case.
SqlStmt_e SqlParser_c::GetSecondaryStmt () const
{
if ( m_pQuery->m_dStringSubkeys.any_of ([] (const CSphString& s) { return s==".threads"; }))
return STMT_SHOW_THREADS;
return STMT_PARSE_ERROR;
}
bool SqlParserTraits_c::CheckOption ( Option_e eOption ) const
{
assert ( m_pStmt );
return ::CheckOption ( m_pStmt->m_eStmt, eOption );
}
bool SqlParser_c::CheckOption ( Option_e eOption ) const
{
assert ( m_pStmt );
auto bRes = ::CheckOption ( m_pStmt->m_eStmt, eOption );
if ( bRes )
return true;
if ( m_pStmt->m_eStmt != STMT_SELECT )
return false;
return ::CheckOption ( GetSecondaryStmt(), eOption );
}
static auto fnFailer ( CSphString& sError )
{
return [&sError] ( const char* sTemplate, ... ) {
va_list ap;
va_start ( ap, sTemplate );
sError.SetSprintfVa ( sTemplate, ap );
va_end ( ap );
return AddOption_e::FAILED;
};
}
#ifdef FAILED
#undef FAILED
#endif
AddOption_e AddOption ( CSphQuery & tQuery, const CSphString & sOpt, const CSphString & sValue, int64_t iValue, SqlStmt_e eStmt, CSphString & sError )
{
auto FAILED = fnFailer (sError);
auto eOpt = ParseOption ( sOpt );
if ( !CheckOption ( eStmt, eOpt ) )
return FAILED ( "unknown option '%s'", sOpt.cstr () );
const Option_e dIntegerOptions[] =
{
Option_e::MAX_MATCHES, Option_e::CUTOFF, Option_e::MAX_QUERY_TIME, Option_e::RETRY_COUNT,
Option_e::RETRY_DELAY, Option_e::IGNORE_NONEXISTENT_COLUMNS, Option_e::AGENT_QUERY_TIMEOUT, Option_e::MAX_PREDICTED_TIME,
Option_e::BOOLEAN_SIMPLIFY, Option_e::GLOBAL_IDF, Option_e::LOCAL_DF, Option_e::IGNORE_NONEXISTENT_INDEXES,
Option_e::STRICT_, Option_e::COLUMNS, Option_e::RAND_SEED, Option_e::SYNC, Option_e::EXPAND_KEYWORDS,
Option_e::THREADS, Option_e::NOT_ONLY_ALLOWED, Option_e::LOW_PRIORITY, Option_e::DEBUG_NO_PAYLOAD,
Option_e::ACCURATE_AGG, Option_e::MAXMATCH_THRESH, Option_e::DISTINCT_THRESH, Option_e::SWITCHOVER,
Option_e::EXPANSION_LIMIT
};
bool bFound = ::any_of ( dIntegerOptions, [eOpt] ( auto i ) { return i == eOpt; } );
if ( !bFound )
return AddOption_e::NOT_FOUND;
if ( sValue.cstr() && !CheckInteger ( sOpt, sValue, sError ) )
return AddOption_e::FAILED;
switch ( eOpt )
{
case Option_e::MAX_MATCHES: // else if ( sOpt=="max_matches" )
tQuery.m_iMaxMatches = (int)iValue;
tQuery.m_bExplicitMaxMatches = true;
break;
case Option_e::DEBUG_NO_PAYLOAD:
if ( iValue )
tQuery.m_uDebugFlags |= QUERY_DEBUG_NO_PAYLOAD;
else
tQuery.m_uDebugFlags &= ~QUERY_DEBUG_NO_PAYLOAD;
break;
case Option_e::CUTOFF: tQuery.m_iCutoff = (int)iValue; break;
case Option_e::MAX_QUERY_TIME: tQuery.m_uMaxQueryMsec = (int)iValue; break;
case Option_e::RETRY_COUNT: tQuery.m_iRetryCount = (int)iValue; break;
case Option_e::RETRY_DELAY: tQuery.m_iRetryDelay = (int)iValue; break;
case Option_e::IGNORE_NONEXISTENT_COLUMNS: tQuery.m_bIgnoreNonexistent = iValue!=0; break;
case Option_e::AGENT_QUERY_TIMEOUT: tQuery.m_iAgentQueryTimeoutMs = (int)iValue; break;
case Option_e::MAX_PREDICTED_TIME: tQuery.m_iMaxPredictedMsec = int ( iValue > INT_MAX ? INT_MAX : iValue ); break;
case Option_e::BOOLEAN_SIMPLIFY: tQuery.m_bSimplify = iValue!=0; break;
case Option_e::GLOBAL_IDF: tQuery.m_bGlobalIDF = iValue!=0; break;
case Option_e::LOCAL_DF: tQuery.m_bLocalDF = iValue!=0; break;
case Option_e::IGNORE_NONEXISTENT_INDEXES: tQuery.m_bIgnoreNonexistentIndexes = iValue!=0; break;
case Option_e::STRICT_: tQuery.m_bStrict = iValue!=0; break;
case Option_e::SYNC: tQuery.m_bSync = iValue!=0; break;
case Option_e::EXPAND_KEYWORDS: tQuery.m_eExpandKeywords = ( iValue!=0 ? QUERY_OPT_ENABLED : QUERY_OPT_DISABLED ); break;
case Option_e::THREADS: tQuery.m_iConcurrency = (int)iValue; break;
case Option_e::NOT_ONLY_ALLOWED: tQuery.m_bNotOnlyAllowed = iValue!=0; break;
case Option_e::RAND_SEED: tQuery.m_iRandSeed = int64_t(DWORD(iValue)); break;
case Option_e::LOW_PRIORITY: tQuery.m_bLowPriority = iValue!=0; break;
case Option_e::ACCURATE_AGG: tQuery.m_bAccurateAggregation = iValue!=0; tQuery.m_bExplicitAccurateAggregation = true; break;
case Option_e::MAXMATCH_THRESH: tQuery.m_iMaxMatchThresh = iValue; break;
case Option_e::DISTINCT_THRESH: tQuery.m_iDistinctThresh = iValue; tQuery.m_bExplicitDistinctThresh = true; break;
case Option_e::THREADS_EX: tQuery.m_iConcurrency = (int)iValue; break;
case Option_e::EXPANSION_LIMIT: tQuery.m_iExpansionLimit = (int)iValue; break;
default:
return AddOption_e::NOT_FOUND;
}
return AddOption_e::ADDED;
}
AddOption_e AddOption ( CSphQuery & tQuery, const CSphString & sOpt, const CSphString & sVal, const std::function<CSphString ()> & fnGetUnescaped, SqlStmt_e eStmt, CSphString & sError )
{
auto FAILED = fnFailer ( sError );
auto eOpt = ParseOption ( sOpt );
if ( !::CheckOption ( eStmt, eOpt ) )
return FAILED ( "unknown option '%s'", sOpt.cstr () );
// OPTIMIZE? hash possible sOpt choices?
switch ( eOpt )
{
case Option_e::RANKER:
tQuery.m_eRanker = SPH_RANK_TOTAL;
for ( int iRanker = SPH_RANK_PROXIMITY_BM25; iRanker<=SPH_RANK_SPH04; iRanker++ )
if ( sVal==sphGetRankerName ( ESphRankMode ( iRanker ) ) )
{
tQuery.m_eRanker = ESphRankMode ( iRanker );
break;
}
if ( tQuery.m_eRanker==SPH_RANK_TOTAL )
{
if ( sVal==sphGetRankerName ( SPH_RANK_EXPR ) || sVal==sphGetRankerName ( SPH_RANK_EXPORT ) )
return FAILED ( "missing ranker expression (use OPTION ranker=expr('1+2') for example)" );
else if ( sphPluginExists ( PLUGIN_RANKER, sVal.cstr() ) )
{
tQuery.m_eRanker = SPH_RANK_PLUGIN;
tQuery.m_sUDRanker = sVal;
}
return FAILED ( "unknown ranker '%s'", sVal.cstr() );
}
break;
case Option_e::TOKEN_FILTER: // tokfilter = hello.dll:hello:some_opts
{
StrVec_t dParams;
if ( !sphPluginParseSpec ( sVal, dParams, sError ) )
return AddOption_e::FAILED;
if ( !dParams.GetLength() )
return FAILED ( "missing token filter spec string" );
tQuery.m_sQueryTokenFilterLib = dParams[0];
tQuery.m_sQueryTokenFilterName = dParams[1];
tQuery.m_sQueryTokenFilterOpts = dParams[2];
}
break;
case Option_e::REVERSE_SCAN: //} else if ( sOpt=="reverse_scan" )
return FAILED ( "reverse_scan is deprecated" );
case Option_e::COMMENT: //} else if ( sOpt=="comment" )
tQuery.m_sComment = fnGetUnescaped();
break;
case Option_e::SORT_METHOD: //} else if ( sOpt=="sort_method" )
if ( sVal=="pq" ) tQuery.m_bSortKbuffer = false;
else if ( sVal=="kbuffer" ) tQuery.m_bSortKbuffer = true;
else
return FAILED ( "unknown sort_method=%s (known values are pq, kbuffer)", sVal.cstr() );
break;
case Option_e::IDF: //} else if ( sOpt=="idf" )
{
StrVec_t dOpts;
sphSplit ( dOpts, sVal.cstr() );
ARRAY_FOREACH ( i, dOpts )
{
if ( dOpts[i]=="normalized" )
tQuery.m_bPlainIDF = false;
else if ( dOpts[i]=="plain" )
tQuery.m_bPlainIDF = true;
else if ( dOpts[i]=="tfidf_normalized" )
tQuery.m_bNormalizedTFIDF = true;
else if ( dOpts[i]=="tfidf_unnormalized" )
tQuery.m_bNormalizedTFIDF = false;
else
return FAILED ( "unknown flag %s in idf=%s (known values are plain, normalized, tfidf_normalized, tfidf_unnormalized)", dOpts[i].cstr(), sVal.cstr() );
}
}
break;
case Option_e::MORPHOLOGY: //} else if ( sOpt=="morphology" )
if ( sVal=="none" )
tQuery.m_eExpandKeywords = QUERY_OPT_MORPH_NONE;
else
return FAILED ( "morphology could be only disabled with option none, got %s", sVal.cstr() );
break;
case Option_e::STORE: //} else if ( sOpt=="store" )
tQuery.m_sStore = sVal;
break;
case Option_e::THREADS_EX:
std::tie ( tQuery.m_tMainDispatcher, tQuery.m_tPseudoShardingDispatcher ) = Dispatcher::ParseTemplates ( sVal.cstr() );
break;
case Option_e::JIEBA_MODE:
if ( !StrToJiebaMode ( tQuery.m_eJiebaMode, sVal, sError ) )
return FAILED(sError.cstr());
break;
default:
return AddOption_e::NOT_FOUND;
}
return AddOption_e::ADDED;
}
AddOption_e AddOption ( CSphQuery & tQuery, const CSphString & sOpt, CSphVector<CSphNamedInt> & dNamed, SqlStmt_e eStmt, CSphString & sError )
{
auto FAILED = fnFailer ( sError );
auto eOpt = ParseOption ( sOpt );
if ( !::CheckOption ( eStmt, eOpt ) )
return FAILED ( "unknown option '%s'", sOpt.cstr () );
switch ( eOpt )
{
case Option_e::FIELD_WEIGHTS: tQuery.m_dFieldWeights.SwapData ( dNamed ); break;
case Option_e::INDEX_WEIGHTS: tQuery.m_dIndexWeights.SwapData ( dNamed ); break;
default:
return AddOption_e::NOT_FOUND;
}
return AddOption_e::ADDED;
}
AddOption_e AddOptionRanker ( CSphQuery & tQuery, const CSphString & sOpt, const CSphString & sVal, const std::function<CSphString ()> & fnGetUnescaped, SqlStmt_e eStmt, CSphString & sError )
{
auto FAILED = fnFailer ( sError );
auto eOpt = ParseOption ( sOpt );
if ( !::CheckOption ( eStmt, eOpt ) )
return FAILED ( "unknown option '%s'", sOpt.cstr () );
if ( eOpt==Option_e::RANKER )
{
if ( sVal=="expr" || sVal=="export" )
{
tQuery.m_eRanker = sVal=="expr" ? SPH_RANK_EXPR : SPH_RANK_EXPORT;
tQuery.m_sRankerExpr = fnGetUnescaped();
return AddOption_e::ADDED;
} else if ( sphPluginExists ( PLUGIN_RANKER, sVal.cstr() ) )
{
tQuery.m_eRanker = SPH_RANK_PLUGIN;
tQuery.m_sUDRanker = sVal;
tQuery.m_sUDRankerOpts = fnGetUnescaped();
return AddOption_e::ADDED;
}
}
return AddOption_e::NOT_FOUND;
}
bool SqlParserTraits_c::AddOption ( const SqlNode_t & tIdent, const SqlNode_t & tValue )
{
CSphString sOpt, sVal;
ToString ( sOpt, tIdent ).ToLower();
ToString ( sVal, tValue ).ToLower().Unquote();
auto eOpt = ParseOption ( sOpt );
if ( !CheckOption ( eOpt ) )
{
m_pParseError->SetSprintf ( "unknown option '%s'", sOpt.cstr () );
return false;
}
AddOption_e eAddRes;
eAddRes = ::AddOption ( *m_pQuery, sOpt, sVal, [this,tValue]{ return ToStringUnescape(tValue); }, m_pStmt->m_eStmt, *m_pParseError );
if ( eAddRes==AddOption_e::FAILED )
return false;
else if ( eAddRes==AddOption_e::ADDED )
return true;
eAddRes = ::AddOption ( *m_pQuery, sOpt, sVal, tValue.GetValueInt(), m_pStmt->m_eStmt, *m_pParseError );
if ( eAddRes==AddOption_e::FAILED )
return false;
else if ( eAddRes==AddOption_e::ADDED )
return true;
// OPTIMIZE? hash possible sOpt choices?
switch ( eOpt )
{
case Option_e::COLUMNS: //} else if ( sOpt=="columns" ) // for SHOW THREADS
if ( !CheckInteger ( sOpt, sVal ) )
return false;
m_pStmt->m_iThreadsCols = Max ( (int)tValue.GetValueInt(), 0 );
break;
case Option_e::FORMAT: //} else if ( sOpt=="format" ) // for SHOW THREADS
m_pStmt->m_sThreadFormat = sVal;
break;
case Option_e::TOKEN_FILTER_OPTIONS: //} else if ( sOpt=="token_filter_options" )
m_pStmt->m_sStringParam = sVal;
break;
case Option_e::SWITCHOVER:
m_pStmt->m_iIntParam = tValue.GetValueInt() ? 1 : 0;
break;
default: //} else
m_pParseError->SetSprintf ( "unknown option '%s' (or bad argument type)", sOpt.cstr() );
return false;
}
return true;
}
bool SqlParserTraits_c::AddOption ( const SqlNode_t & tIdent, const SqlNode_t & tValue, const SqlNode_t & tArg )
{
CSphString sOpt, sVal;
ToString ( sOpt, tIdent ).ToLower();
ToString ( sVal, tValue ).ToLower().Unquote();
auto eOpt = ParseOption ( sOpt );
if ( !CheckOption ( eOpt ) )
{
m_pParseError->SetSprintf ( "unknown option '%s'", sOpt.cstr () );
return false;
}
AddOption_e eAdd = ::AddOptionRanker ( *m_pQuery, sOpt, sVal, [this,tArg]{ return ToStringUnescape(tArg); }, m_pStmt->m_eStmt, *m_pParseError );
if ( eAdd==AddOption_e::NOT_FOUND )
m_pParseError->SetSprintf ( "unknown option '%s' (or bad argument type)", sOpt.cstr() );
return eAdd==AddOption_e::ADDED;
}
bool SqlParserTraits_c::AddOption ( const SqlNode_t & tIdent, CSphVector<CSphNamedInt> & dNamed )
{
CSphString sOpt;
ToString ( sOpt, tIdent ).ToLower ();
auto eOpt = ParseOption ( sOpt );
if ( !CheckOption ( eOpt ) )
{
m_pParseError->SetSprintf ( "unknown option '%s'", sOpt.cstr () );
return false;
}
AddOption_e eAdd = ::AddOption ( *m_pQuery, sOpt, dNamed, m_pStmt->m_eStmt, *m_pParseError );
if ( eAdd==AddOption_e::NOT_FOUND )
m_pParseError->SetSprintf ( "unknown option '%s' (or bad argument type)", sOpt.cstr() );
return eAdd==AddOption_e::ADDED;
}
void SqlParser_c::AddIndexHint ( SecondaryIndexType_e eType, bool bForce, const SqlNode_t & tValue )
{
CSphString sIndexes;
ToString ( sIndexes, tValue );
StrVec_t dIndexes;
sphSplit ( dIndexes, sIndexes.cstr(), ", \t" );
for ( const auto & i : dIndexes )
{
IndexHint_t & tHint = m_pQuery->m_dIndexHints.Add();
tHint.m_sIndex = i;
tHint.m_eType = eType;
tHint.m_bForce = bForce;
}
}
void SqlParser_c::AliasLastItem ( SqlNode_t * pAlias )
{
if ( pAlias )
{
CSphQueryItem & tItem = m_pQuery->m_dItems.Last();
tItem.m_sAlias.SetBinary ( m_pBuf + pAlias->m_iStart, pAlias->m_iEnd - pAlias->m_iStart );
tItem.m_sAlias.ToLower();
SetSelect ( pAlias );
}
}
void SqlParser_c::AddInsval ( CSphVector<SqlInsert_t> & dVec, const SqlNode_t & tNode )
{
SqlInsert_t & tIns = dVec.Add();
tIns.m_iType = tNode.m_iType;
tIns.CopyValueInt(tNode);
tIns.m_fVal = tNode.m_fValue;
if ( tIns.m_iType==TOK_QUOTED_STRING )
tIns.m_sVal = ToStringUnescape ( tNode );
tIns.m_pVals = tNode.m_pValues;
}
void SqlParser_c::ResetSelect()
{
if ( m_pQuery )
m_pQuery->m_iSQLSelectStart = m_pQuery->m_iSQLSelectEnd = -1;
}
void SqlParser_c::SetSelect ( SqlNode_t * pStart, SqlNode_t * pEnd )
{
if ( m_pQuery )
{
if ( pStart && ( m_pQuery->m_iSQLSelectStart<0 || m_pQuery->m_iSQLSelectStart>pStart->m_iStart ) )
m_pQuery->m_iSQLSelectStart = pStart->m_iStart;
if ( !pEnd )
pEnd = pStart;
if ( pEnd && ( m_pQuery->m_iSQLSelectEnd<0 || m_pQuery->m_iSQLSelectEnd<pEnd->m_iEnd ) )
m_pQuery->m_iSQLSelectEnd = pEnd->m_iEnd;
}
}
void SqlParser_c::AutoAlias ( CSphQueryItem & tItem, SqlNode_t * pStart, SqlNode_t * pEnd )
{
if ( pStart && pEnd )
{
tItem.m_sAlias.SetBinary ( m_pBuf + pStart->m_iStart, pEnd->m_iEnd - pStart->m_iStart );
sphColumnToLowercase ( const_cast<char *>( tItem.m_sAlias.cstr() ) );
} else
{
tItem.m_sAlias = tItem.m_sExpr;
}
SetSelect ( pStart, pEnd );
}
void SqlParser_c::AddItem ( SqlNode_t * pExpr, ESphAggrFunc eAggrFunc, SqlNode_t * pStart, SqlNode_t * pEnd )
{
CSphQueryItem & tItem = m_pQuery->m_dItems.Add();
tItem.m_sExpr.SetBinary ( m_pBuf + pExpr->m_iStart, pExpr->m_iEnd - pExpr->m_iStart );
sphColumnToLowercase ( const_cast<char *>( tItem.m_sExpr.cstr() ) );
tItem.m_eAggrFunc = eAggrFunc;
AutoAlias ( tItem, pStart?pStart:pExpr, pEnd?pEnd:pExpr );
}
bool SqlParser_c::AddItem ( const char * pToken, SqlNode_t * pStart, SqlNode_t * pEnd )
{
CSphQueryItem & tItem = m_pQuery->m_dItems.Add();
tItem.m_sExpr = pToken;
tItem.m_eAggrFunc = SPH_AGGR_NONE;
sphColumnToLowercase ( const_cast<char *>( tItem.m_sExpr.cstr() ) );
AutoAlias ( tItem, pStart, pEnd );
return SetNewSyntax();
}
bool SqlParser_c::AddCount ()
{
CSphQueryItem & tItem = m_pQuery->m_dItems.Add();
tItem.m_sExpr = tItem.m_sAlias = "count(*)";
tItem.m_eAggrFunc = SPH_AGGR_NONE;
return SetNewSyntax();
}
void SqlParser_c::AddGroupBy ( const SqlNode_t & tGroupBy )
{
if ( m_pQuery->m_sGroupBy.IsEmpty() )
{
m_pQuery->m_eGroupFunc = SPH_GROUPBY_ATTR;
m_pQuery->m_sGroupBy.SetBinary ( m_pBuf + tGroupBy.m_iStart, tGroupBy.m_iEnd - tGroupBy.m_iStart );
sphColumnToLowercase ( const_cast<char *>( m_pQuery->m_sGroupBy.cstr() ) );
} else
{
m_pQuery->m_eGroupFunc = SPH_GROUPBY_MULTIPLE;
CSphString sTmp;
sTmp.SetBinary ( m_pBuf + tGroupBy.m_iStart, tGroupBy.m_iEnd - tGroupBy.m_iStart );
sphColumnToLowercase ( const_cast<char *>( sTmp.cstr() ) );
m_pQuery->m_sGroupBy.SetSprintf ( "%s, %s", m_pQuery->m_sGroupBy.cstr(), sTmp.cstr() );
}
}
void SqlParser_c::SetGroupbyLimit ( int iLimit )
{
m_pQuery->m_iGroupbyLimit = iLimit;
}
bool SqlParser_c::AddDistinct ( SqlNode_t * pNewExpr, SqlNode_t * pStart, SqlNode_t * pEnd )
{
CSphString sDistinct;
ToString ( sDistinct, *pNewExpr );
if ( !m_pQuery->m_sGroupDistinct.IsEmpty() && m_pQuery->m_sGroupDistinct!=sDistinct )
{
yyerror ( this, "too many COUNT(DISTINCT) clauses" );
return false;
}
m_pQuery->m_sGroupDistinct = sDistinct;
return AddItem ( "@distinct", pStart, pEnd );
}
void SqlParser_c::AddDistinct ( SqlNode_t * pNewExpr )
{
if ( !pNewExpr )
{
m_pQuery->m_sGroupDistinct = "id";
}
else
{
ToString ( m_pQuery->m_sGroupDistinct, *pNewExpr );
sphColumnToLowercase ( const_cast<char *>( m_pQuery->m_sGroupDistinct.cstr() ) );
}
}
bool SqlParser_c::AddDistinctSort ( SqlNode_t * pNewExpr, SqlNode_t * pStart, SqlNode_t * pEnd, bool bSortAsc )
{
if ( !AddDistinct ( pNewExpr, pStart, pEnd ) )
return false;
m_pQuery->m_sOrderBy.SetSprintf ( "@distinct %s", ( bSortAsc ? "asc" : "desc" ) );
return true;
}
bool SqlParser_c::MaybeAddFacetDistinct()
{
if ( m_pQuery->m_sGroupDistinct.IsEmpty() )
return true;
// distinct could be already added by order by
if ( m_pQuery->m_dItems.Contains ( bind ( &CSphQueryItem::m_sExpr ), "@distinct" ) )
return true;
CSphQueryItem tItem;
tItem.m_sExpr = "@distinct";
tItem.m_eAggrFunc = SPH_AGGR_NONE;
tItem.m_sAlias.SetSprintf ( "count(distinct %s)", m_pQuery->m_sGroupDistinct.cstr() );
int iCountPos = m_pQuery->m_dItems.GetFirst ( [] ( const CSphQueryItem & tElem ) { return ( tElem.m_sExpr=="count(*)" ); });
if ( iCountPos==-1 )
{
yyerror ( this, "can not find COUNT clause" );
return false;
}
m_pQuery->m_dItems.Insert ( iCountPos, tItem );
return SetNewSyntax();
}
bool SqlParser_c::SetupFacetStmt()
{
m_pStmt->m_eStmt = STMT_FACET;
if ( m_pQuery->m_sFacetBy.IsEmpty() )
{
m_pQuery->m_sFacetBy = m_pQuery->m_sGroupBy;
AddCount();
}
return MaybeAddFacetDistinct();
}
bool SqlParser_c::AddSchemaItem ( YYSTYPE * pNode )
{
assert ( m_pStmt );
CSphString sItem;
sItem.SetBinary ( m_pBuf + pNode->m_iStart, pNode->m_iEnd - pNode->m_iStart );
return m_pStmt->AddSchemaItem ( sItem.cstr() );
}
bool SqlParser_c::SetMatch ( const YYSTYPE & tValue )
{
if ( m_bMatchClause )
{
yyerror ( this, "too many MATCH() clauses" );
return false;
}
m_bMatchClause = true;
m_pQuery->m_sQuery = ToStringUnescape ( tValue );
m_pQuery->m_sRawQuery = m_pQuery->m_sQuery;
return true;
}
bool SqlParser_c::AddMatch ( const SqlNode_t & tValue, const SqlNode_t & tIndex )
{
// so the index from tIndex is either in m_pQuery->m_sIndexes OR equal to m_pQuery->m_sJoinIdx\
// check it!
StrVec_t dQueryIndexes;
ParseIndexList ( m_pQuery->m_sIndexes, dQueryIndexes );
CSphString sMatchIndex;
ToString ( sMatchIndex, tIndex );
CSphString sError;
if ( dQueryIndexes.any_of ( [&sMatchIndex]( const CSphString & sIndex ){ return sIndex==sMatchIndex; } ) )
{
if ( m_bMatchClause )
{
sError.SetSprintf ( "Multiple MATCH() clauses for table '%s' found", sMatchIndex.cstr() );
yyerror ( this, sError.cstr() );
return false;
}
// it's a plain match() on othe left index
m_pQuery->m_sQuery = ToStringUnescape(tValue);
m_pQuery->m_sRawQuery = m_pQuery->m_sQuery;
m_bMatchClause = true;
}
else if ( m_pQuery->m_sJoinIdx.Length() && m_pQuery->m_sJoinIdx==sMatchIndex )
{
if ( m_bJoinMatchClause )
{
sError.SetSprintf ( "Multiple MATCH() clauses for table '%s' found", sMatchIndex.cstr() );
yyerror ( this, sError.cstr() );
return false;
}
m_pQuery->m_sJoinQuery = ToStringUnescape(tValue);
m_bJoinMatchClause = true;
}
else
{
sError.SetSprintf ( "Unknown table '%s' found in MATCH() clause", sMatchIndex.cstr() );
yyerror ( this, sError.cstr() );
return false;
}
return true;
}
bool SqlParser_c::SetKNN ( const SqlNode_t & tAttr, const SqlNode_t & tK, const SqlNode_t & tValues, const SqlNode_t * pEf )
{
ToString ( m_pQuery->m_sKNNAttr, tAttr );
m_pQuery->m_iKNNK = tK.GetValueInt();
if ( pEf )
m_pQuery->m_iKnnEf = pEf->GetValueInt();
auto pValues = tValues.m_pValues;
if ( pValues )
{
m_pQuery->m_dKNNVec.Reserve ( pValues->GetLength() );
for ( auto & i : *pValues )
m_pQuery->m_dKNNVec.Add( i.m_fValue );
}
return true;
}
void SqlParser_c::AddConst ( int iList, const YYSTYPE& tValue )
{
CSphVector<CSphNamedInt> & dVec = GetNamedVec ( iList );
dVec.Add();
ToString ( dVec.Last().first, tValue ).ToLower();
dVec.Last().second = (int) tValue.GetValueInt();
}
void SqlParser_c::SetLocalStatement ( const YYSTYPE & tName )
{
m_pStmt->m_eStmt = STMT_SET;
m_pStmt->m_eSet = SET_LOCAL;
ToString ( m_pStmt->m_sSetName, tName );
}
void SqlParser_c::SwapSubkeys ()
{
m_pQuery->m_dIntSubkeys.SwapData ( m_pStmt->m_dIntSubkeys );
m_pQuery->m_dStringSubkeys.SwapData ( m_pStmt->m_dStringSubkeys );
}
void SqlParser_c::AddComment ( const SqlNode_t* tNode )
{
CSphString sComment;
ToString ( sComment, *tNode );
StrVec_t sParts;
sphSplit ( sParts, sComment.cstr(), " " );
for ( auto& sPart : sParts )
{
sPart.Trim();
sPart.ToUpper();
if ( sPart == "SQL_NO_CACHE" )
{
sphLogDebug ( "Found SQL_NO_CACHE, set limit=-1" );
SetLimit ( 0, -1 );
}
}
}
void SqlParser_c::GenericStatement ( SqlNode_t * pNode )
{
SwapSubkeys();
m_pStmt->m_iListStart = pNode->m_iStart;
m_pStmt->m_iListEnd = pNode->m_iEnd;
SetIndex ( *pNode );
}
void SqlParser_c::AddUpdatedAttr ( const SqlNode_t & tName, ESphAttr eType ) const
{
CSphAttrUpdate & tUpd = m_pStmt->AttrUpdate();
CSphString sAttr;
TypedAttribute_t & tNew = tUpd.m_dAttributes.Add();
tNew.m_sName = ToString ( sAttr, tName ).ToLower();
tNew.m_eType = eType;
}
void SqlParser_c::UpdateMVAAttr ( const SqlNode_t & tName, const SqlNode_t & dValues )
{
CSphAttrUpdate & tUpd = m_pStmt->AttrUpdate();
ESphAttr eType = SPH_ATTR_UINT32SET;
if ( dValues.m_pValues && dValues.m_pValues->GetLength()>0 )
{
bool bHaveInt64 = false;
bool bHaveFloat = false;
for ( auto tValue : *dValues.m_pValues )
{
bHaveInt64 |= tValue.m_iValue > UINT_MAX;
bHaveFloat |= tValue.m_bFloat;
}
eType = bHaveFloat ? SPH_ATTR_FLOAT_VECTOR : ( bHaveInt64 ? SPH_ATTR_INT64SET : SPH_ATTR_UINT32SET );
tUpd.m_dPool.Add ( dValues.m_pValues->GetLength()*2 );
for ( auto tValue : *dValues.m_pValues )
{
if ( eType==SPH_ATTR_FLOAT_VECTOR )
*((int64_t*)tUpd.m_dPool.AddN(2)) = sphF2DW ( tValue.m_fValue );
else
*((int64_t*)tUpd.m_dPool.AddN(2)) = tValue.m_iValue;
}
}
else
{
// no values, means we should delete the attribute
// we signal that to the update code by putting a single zero
// to the values pool (meaning a zero-length MVA values list)
tUpd.m_dPool.Add ( 0 );
}
AddUpdatedAttr ( tName, eType );
}
void SqlParser_c::UpdateStringAttr ( const SqlNode_t & tCol, const SqlNode_t & tStr )
{
CSphAttrUpdate & tUpd = m_pStmt->AttrUpdate();
auto sStr = ToStringUnescape ( tStr );
int iLength = sStr.Length();
tUpd.m_dPool.Add ( tUpd.m_dBlobs.GetLength() );
tUpd.m_dPool.Add ( iLength );
if ( iLength )
{
BYTE * pBlob = tUpd.m_dBlobs.AddN ( iLength+2 ); // a couple of extra \0 for json parser to be happy
memcpy ( pBlob, sStr.cstr(), iLength );
pBlob[iLength] = 0;
pBlob[iLength+1] = 0;
}
AddUpdatedAttr ( tCol, SPH_ATTR_STRING );
}
CSphFilterSettings * SqlParser_c::AddFilter ( const SqlNode_t & tCol, ESphFilter eType )
{
CSphString sCol;
ToString ( sCol, tCol ); // do NOT lowercase just yet, might have to retain case for JSON cols
FilterTreeItem_t & tElem = m_dFilterTree.Add();
tElem.m_iFilterItem = m_pQuery->m_dFilters.GetLength();
CSphFilterSettings * pFilter = &m_pQuery->m_dFilters.Add();
pFilter->m_sAttrName = sCol;
pFilter->m_eType = eType;
sphColumnToLowercase ( const_cast<char *>( pFilter->m_sAttrName.cstr() ) );
return pFilter;
}
static void CopyValuesToFilter ( CSphFilterSettings * pFilter, const RefcountedVector_c<AttrValue_t> & dValues )
{
if ( !pFilter )
return;
auto & dFV = pFilter->m_dValues;
dFV.Resize ( dValues.GetLength() );
ARRAY_FOREACH ( i, dValues )
dFV[i] = dValues[i].m_iValue;
dFV.Uniq();
}
CSphFilterSettings * SqlParser_c::AddFilter ( const SqlNode_t & tCol, ESphFilter eType, const RefcountedVector_c<AttrValue_t> & dValues )
{
auto pFilter = AddFilter ( tCol, eType );
CopyValuesToFilter ( pFilter, dValues );
return pFilter;
}
bool SqlParser_c::AddFloatRangeFilter ( const SqlNode_t & sAttr, float fMin, float fMax, bool bHasEqual, bool bExclude )
{
CSphFilterSettings * pFilter = AddFilter ( sAttr, SPH_FILTER_FLOATRANGE );
if ( !pFilter )
return false;
pFilter->m_fMinValue = fMin;
pFilter->m_fMaxValue = fMax;
pFilter->m_bHasEqualMin = bHasEqual;
pFilter->m_bHasEqualMax = bHasEqual;
pFilter->m_bExclude = bExclude;
return true;
}
bool SqlParser_c::AddFloatFilterGreater ( const SqlNode_t & tAttr, float fVal, bool bHasEqual )
{
CSphFilterSettings * pFilter = AddFilter ( tAttr, SPH_FILTER_FLOATRANGE );
if ( !pFilter )
return false;
pFilter->m_fMaxValue = FLT_MAX;
pFilter->m_fMinValue = fVal;
pFilter->m_bHasEqualMin = bHasEqual;
pFilter->m_bOpenRight = true;
return true;
}
bool SqlParser_c::AddFloatFilterLesser ( const SqlNode_t & tAttr, float fVal, bool bHasEqual )
{
CSphFilterSettings * pFilter = AddFilter ( tAttr, SPH_FILTER_FLOATRANGE );
if ( !pFilter )
return false;
pFilter->m_fMinValue = -FLT_MAX;
pFilter->m_fMaxValue = fVal;
pFilter->m_bHasEqualMax = bHasEqual;
pFilter->m_bOpenLeft = true;
return true;
}
bool SqlParser_c::AddIntRangeFilter ( const SqlNode_t & sAttr, int64_t iMin, int64_t iMax, bool bExclude )
{
CSphFilterSettings * pFilter = AddFilter ( sAttr, SPH_FILTER_RANGE );
if ( !pFilter )
return false;
pFilter->m_iMinValue = iMin;
pFilter->m_iMaxValue = iMax;
pFilter->m_bExclude = bExclude;
return true;
}
bool SqlParser_c::AddIntFilterGreater ( const SqlNode_t & tAttr, int64_t iVal, bool bHasEqual )
{
CSphFilterSettings * pFilter = AddFilter ( tAttr, SPH_FILTER_RANGE );
if ( !pFilter )
return false;
pFilter->m_iMaxValue = LLONG_MAX;
pFilter->m_iMinValue = iVal;
pFilter->m_bHasEqualMin = bHasEqual;
pFilter->m_bOpenRight = true;
return true;
}
bool SqlParser_c::AddIntFilterLesser ( const SqlNode_t & tAttr, int64_t iVal, bool bHasEqual )
{
CSphFilterSettings * pFilter = AddFilter ( tAttr, SPH_FILTER_RANGE );
if ( !pFilter )
return false;
pFilter->m_iMinValue = LLONG_MIN;
pFilter->m_iMaxValue = iVal;
pFilter->m_bHasEqualMax = bHasEqual;
pFilter->m_bOpenLeft = true;
return true;
}
bool SqlParser_c::AddUservarFilter ( const SqlNode_t & tCol, const SqlNode_t & tVar, bool bExclude )
{
CSphFilterSettings * pFilter = AddFilter ( tCol, SPH_FILTER_USERVAR );
if ( !pFilter )
return false;
CSphString & sUserVar = pFilter->m_dStrings.Add();
ToString ( sUserVar, tVar ).ToLower();
pFilter->m_bExclude = bExclude;
return true;
}
bool SqlParser_c::AddStringFilter ( const SqlNode_t & tCol, const SqlNode_t & tVal, bool bExclude )
{
CSphFilterSettings * pFilter = AddFilter ( tCol, SPH_FILTER_STRING );
if ( !pFilter )
return false;
CSphString & sFilterString = pFilter->m_dStrings.Add();
sFilterString = ToStringUnescape ( tVal );
pFilter->m_bExclude = bExclude;
return true;
}
bool SqlParser_c::AddStringCmpFilter ( const SqlNode_t & tCol, const SqlNode_t & tVal, bool bExclude, EStrCmpDir eStrCmpDir )
{
CSphFilterSettings * pFilter = AddFilter ( tCol, SPH_FILTER_STRING );
if ( !pFilter )
return false;
CSphString & sFilterString = pFilter->m_dStrings.Add();
sFilterString = ToStringUnescape ( tVal );
pFilter->m_eStrCmpDir = eStrCmpDir;
pFilter->m_bExclude = bExclude;
return true;
}
CSphFilterSettings * SqlParser_c::AddValuesFilter ( const SqlNode_t & tCol, const RefcountedVector_c<AttrValue_t> & dValues )
{
CSphFilterSettings * pFilter = AddFilter ( tCol, SPH_FILTER_VALUES );
CopyValuesToFilter ( pFilter, dValues );
return pFilter;
}
bool SqlParser_c::AddStringListFilter ( const SqlNode_t & tCol, SqlNode_t & tVal, StrList_e eType, bool bInverse )
{
CSphFilterSettings * pFilter = AddFilter ( tCol, SPH_FILTER_STRING_LIST );
if ( !pFilter || !tVal.m_pValues )
return false;
pFilter->m_dStrings.Resize ( tVal.m_pValues->GetLength() );
ARRAY_FOREACH ( i, ( *tVal.m_pValues ) )
{
uint64_t uVal = ( *tVal.m_pValues )[i].m_iValue;
int iOff = ( uVal>>32 );
int iLen = ( uVal & 0xffffffff );
pFilter->m_dStrings[i] = SqlUnescape ( m_pBuf + iOff, iLen );
}
tVal.m_pValues = nullptr;
pFilter->m_bExclude = bInverse;
assert ( pFilter->m_eMvaFunc == SPH_MVAFUNC_NONE ); // that is default for IN filter
if ( eType==StrList_e::STR_ANY )
pFilter->m_eMvaFunc = SPH_MVAFUNC_ANY;
else if ( eType==StrList_e::STR_ALL )
pFilter->m_eMvaFunc = SPH_MVAFUNC_ALL;
return true;
}
bool SqlParser_c::AddNullFilter ( const SqlNode_t & tCol, bool bEqualsNull )
{
CSphFilterSettings * pFilter = AddFilter ( tCol, SPH_FILTER_NULL );
if ( !pFilter )
return false;
pFilter->m_bIsNull = bEqualsNull;
return true;
}
void SqlParser_c::AddHaving ()
{
assert ( m_pQuery->m_dFilters.GetLength() );
m_pQuery->m_tHaving = m_pQuery->m_dFilters.Pop();
}
bool SqlParser_c::SetJoin ( const SqlNode_t & tIdx )
{
// set default join mode
if ( m_pQuery->m_eJoinType==JoinType_e::NONE )
m_pQuery->m_eJoinType = JoinType_e::INNER;
ToString ( m_pQuery->m_sJoinIdx, tIdx );
return true;
}
bool SqlParser_c::AddOnFilter ( const SqlNode_t & tIdx1, const SqlNode_t & tAttr1, const SqlNode_t & tIdx2, const SqlNode_t & tAttr2, int iTypeCast )
{
auto & tOn = m_pQuery->m_dOnFilters.Add();
ToString ( tOn.m_sIdx1, tIdx1 );
ToString ( tOn.m_sIdx2, tIdx2 );
ToString ( tOn.m_sAttr1, tAttr1 );
ToString ( tOn.m_sAttr2, tAttr2 );
if ( tOn.m_sAttr1.Begins(".") )
tOn.m_sAttr1 = tOn.m_sAttr1.SubString( 1, tOn.m_sAttr1.Length()-1 );
if ( tOn.m_sAttr2.Begins(".") )
tOn.m_sAttr2 = tOn.m_sAttr2.SubString( 1, tOn.m_sAttr2.Length()-1 );
sphColumnToLowercase ( const_cast<char*>( tOn.m_sAttr1.cstr() ) );
sphColumnToLowercase ( const_cast<char*>( tOn.m_sAttr2.cstr() ) );
if ( iTypeCast==0 )
tOn.m_eTypeCast1 = m_eJoinTypeCast;
if ( iTypeCast==1 )
tOn.m_eTypeCast2 = m_eJoinTypeCast;
m_eJoinTypeCast = SPH_ATTR_NONE;
return true;
}
void SqlParser_c::SetJoinType ( JoinType_e eType )
{
m_pQuery->m_eJoinType = eType;
}
bool SqlParser_c::IsGoodSyntax()
{
if ( ( m_uSyntaxFlags & 3 )!=3 )
return true;
yyerror ( this, "Mixing the old-fashion internal vars (@id, @count, @weight) with new acronyms like count(*), weight() is prohibited" );
return false;
}
int SqlParser_c::AllocNamedVec ()
{
// we only allow one such vector at a time, right now
assert ( !m_bNamedVecBusy );
m_bNamedVecBusy = true;
m_dNamedVec.Resize ( 0 );
return 0;
}
void SqlParser_c::SetLimit ( int iOffset, int iLimit )
{
m_pQuery->m_iOffset = iOffset;
m_pQuery->m_iLimit = iLimit;
}
#ifndef NDEBUG
CSphVector<CSphNamedInt> & SqlParser_c::GetNamedVec ( int iIndex )
#else
CSphVector<CSphNamedInt> & SqlParser_c::GetNamedVec ( int )
#endif
{
assert ( m_bNamedVecBusy && iIndex==0 );
return m_dNamedVec;
}
#ifndef NDEBUG
void SqlParser_c::FreeNamedVec ( int iIndex )
#else
void SqlParser_c::FreeNamedVec ( int )
#endif
{
assert ( m_bNamedVecBusy && iIndex==0 );
m_bNamedVecBusy = false;
m_dNamedVec.Resize ( 0 );
}
void SqlParser_c::SetOp ( SqlNode_t & tNode )
{
tNode.m_iParsedOp = m_dFilterTree.GetLength() - 1;
}
bool SqlParser_c::SetOldSyntax()
{
m_uSyntaxFlags |= 1;
return IsGoodSyntax ();
}
bool SqlParser_c::SetNewSyntax()
{
m_uSyntaxFlags |= 2;
return IsGoodSyntax();
}
bool SqlParser_c::IsDeprecatedSyntax() const
{
return m_uSyntaxFlags & 1;
}
void SqlParser_c::FilterGroup ( SqlNode_t & tNode, SqlNode_t & tExpr )
{
tNode.m_iParsedOp = tExpr.m_iParsedOp;
}
void SqlParser_c::FilterAnd ( SqlNode_t & tNode, const SqlNode_t & tLeft, const SqlNode_t & tRight )
{
tNode.m_iParsedOp = m_dFilterTree.GetLength();
FilterTreeItem_t & tElem = m_dFilterTree.Add();
tElem.m_iLeft = tLeft.m_iParsedOp;
tElem.m_iRight = tRight.m_iParsedOp;
}
void SqlParser_c::FilterOr ( SqlNode_t & tNode, const SqlNode_t & tLeft, const SqlNode_t & tRight )
{
tNode.m_iParsedOp = m_dFilterTree.GetLength();
m_bGotFilterOr = true;
FilterTreeItem_t & tElem = m_dFilterTree.Add();
tElem.m_bOr = true;
tElem.m_iLeft = tLeft.m_iParsedOp;
tElem.m_iRight = tRight.m_iParsedOp;
}
struct QueryItemProxy_t
{
DWORD m_uHash;
int m_iIndex;
CSphQueryItem * m_pItem;
bool operator < ( const QueryItemProxy_t & tItem ) const
{
return ( ( m_uHash<tItem.m_uHash ) || ( m_uHash==tItem.m_uHash && m_iIndex<tItem.m_iIndex ) );
}
bool operator == ( const QueryItemProxy_t & tItem ) const
{
return ( m_uHash==tItem.m_uHash );
}
void QueryItemHash ()
{
assert ( m_pItem );
m_uHash = sphCRC32 ( m_pItem->m_sAlias.cstr() );
m_uHash = sphCRC32 ( m_pItem->m_sExpr.cstr(), m_pItem->m_sExpr.Length(), m_uHash );
m_uHash = sphCRC32 ( (const void*)&m_pItem->m_eAggrFunc, sizeof(m_pItem->m_eAggrFunc), m_uHash );
}
};
static void CreateFilterTree ( const CSphVector<FilterTreeItem_t> & dOps, int iStart, int iCount, CSphQuery & tQuery )
{
bool bHasOr = false;
int iTreeCount = iCount - iStart;
CSphVector<FilterTreeItem_t> dTree ( iTreeCount );
for ( int i = 0; i<iTreeCount; i++ )
{
FilterTreeItem_t tItem = dOps[iStart + i];
tItem.m_iLeft = ( tItem.m_iLeft==-1 ? -1 : tItem.m_iLeft - iStart );
tItem.m_iRight = ( tItem.m_iRight==-1 ? -1 : tItem.m_iRight - iStart );
dTree[i] = tItem;
bHasOr |= ( tItem.m_iFilterItem==-1 && tItem.m_bOr );
}
// query has only plain AND filters - no need for filter tree
if ( !bHasOr )
return;
tQuery.m_dFilterTree.SwapData ( dTree );
}
struct HintComp_fn
{
bool IsLess ( const IndexHint_t & tA, const IndexHint_t & tB ) const
{
return strcasecmp ( tA.m_sIndex.cstr(), tB.m_sIndex.cstr() ) < 0;
}
bool IsEq ( const IndexHint_t & tA, const IndexHint_t & tB ) const
{
return tA.m_sIndex==tB.m_sIndex && tA.m_eType==tB.m_eType && tA.m_bForce==tB.m_bForce;
}
};
static bool CheckQueryHints ( CSphVector<IndexHint_t> & dHints, CSphString & sError )
{
sphSort ( dHints.Begin(), dHints.GetLength(), HintComp_fn() );
sphUniq ( dHints.Begin(), dHints.GetLength(), HintComp_fn() );
for ( int i = 1; i < dHints.GetLength(); i++ )
if ( dHints[i-1].m_sIndex==dHints[i].m_sIndex && dHints[i-1].m_eType==dHints[i].m_eType )
{
sError.SetSprintf ( "conflicting hints specified for table '%s'", dHints[i-1].m_sIndex.cstr() );
return false;
}
return true;
}
static ParseResult_e ParseNext ( Str_t sQuery, CSphVector<SqlStmt_t>& dStmt, CSphString& sError, bool bKeepError )
{
using ParserFN = ParseResult_e ( * ) ( Str_t sQuery, CSphVector<SqlStmt_t> & dStmt, CSphString & sError );
ParserFN dParsers[] = { ParseDdlEx, ParseSecond, ParseDebugCmd, ParseExtra };
CSphString sNewError;
ParseResult_e eRes;
for ( ParserFN pParser : dParsers )
{
if ( !dStmt.IsEmpty() )
dStmt.Pop();
eRes = pParser ( sQuery, dStmt, sNewError );
if ( eRes != ParseResult_e::PARSE_OK )
sphLogDebugv ( "%s", sNewError.cstr() );
if ( eRes == ParseResult_e::PARSE_ERROR && !bKeepError )
{
sError = sNewError;
bKeepError = true;
}
if ( eRes == ParseResult_e::PARSE_OK )
break;
}
return eRes;
}
static bool SetupFacets ( CSphVector<SqlStmt_t> & dStmt )
{
bool bGotFacet = false;
ARRAY_FOREACH ( i, dStmt )
{
const SqlStmt_t & tHeadStmt = dStmt[i];
const CSphQuery & tHeadQuery = tHeadStmt.m_tQuery;
if ( dStmt[i].m_eStmt!=STMT_SELECT )
continue;
++i;
if ( i<dStmt.GetLength() && dStmt[i].m_eStmt==STMT_FACET )
{
bGotFacet = true;
const_cast<CSphQuery &>(tHeadQuery).m_bFacetHead = true;
}
for ( ; i<dStmt.GetLength() && dStmt[i].m_eStmt==STMT_FACET; ++i )
{
SqlStmt_t & tStmt = dStmt[i];
tStmt.m_tQuery.m_bFacet = true;
tStmt.m_eStmt = STMT_SELECT;
tStmt.m_tQuery.m_sIndexes = tHeadQuery.m_sIndexes;
tStmt.m_tQuery.m_sSelect = tStmt.m_tQuery.m_sFacetBy;
tStmt.m_tQuery.m_sQuery = tHeadQuery.m_sQuery;
tStmt.m_tQuery.m_iMaxMatches = tHeadQuery.m_iMaxMatches;
tStmt.m_tQuery.m_sKNNAttr = tHeadQuery.m_sKNNAttr;
tStmt.m_tQuery.m_dKNNVec = tHeadQuery.m_dKNNVec;
tStmt.m_tQuery.m_sJoinIdx = tHeadQuery.m_sJoinIdx;
tStmt.m_tQuery.m_eJoinType = tHeadQuery.m_eJoinType;
tStmt.m_tQuery.m_dOnFilters = tHeadQuery.m_dOnFilters;
// append filters
ARRAY_FOREACH ( k, tHeadQuery.m_dFilters )
tStmt.m_tQuery.m_dFilters.Add ( tHeadQuery.m_dFilters[k] );
ARRAY_FOREACH ( k, tHeadQuery.m_dFilterTree )
tStmt.m_tQuery.m_dFilterTree.Add ( tHeadQuery.m_dFilterTree[k] );
}
}
return bGotFacet;
}
static bool SetupFacetDistinct ( CSphVector<SqlStmt_t> & dStmt, CSphString & sError )
{
CSphString sDistinct;
// need to keep order of query items same as at select list however do not duplicate items
// that is why raw Vector.Uniq does not work here
CSphVector<QueryItemProxy_t> dSelectItems;
ARRAY_FOREACH ( i, dStmt )
{
CSphQuery & tQuery = dStmt[i].m_tQuery;
ARRAY_FOREACH ( k, tQuery.m_dItems )
{
QueryItemProxy_t & tItem = dSelectItems.Add();
tItem.m_pItem = tQuery.m_dItems.Begin() + k;
tItem.m_iIndex = dSelectItems.GetLength() - 1;
tItem.QueryItemHash();
}
if ( !tQuery.m_sGroupDistinct.IsEmpty() )
{
if ( !sDistinct.IsEmpty() && sDistinct!=tQuery.m_sGroupDistinct )
{
sError.SetSprintf ( "distinct field for all FACET queries should be the same '%s', query %d got '%s'", sDistinct.cstr(), i, tQuery.m_sGroupDistinct.cstr() );
return false;
}
if ( sDistinct.IsEmpty() )
sDistinct = tQuery.m_sGroupDistinct;
}
}
// got rid of duplicates
dSelectItems.Uniq();
// sort back to select list appearance order
dSelectItems.Sort ( bind ( &QueryItemProxy_t::m_iIndex ) );
// get merged select list
CSphVector<CSphQueryItem> dItems ( dSelectItems.GetLength() );
ARRAY_FOREACH ( i, dSelectItems )
{
dItems[i] = *dSelectItems[i].m_pItem;
}
for ( SqlStmt_t& tStmt : dStmt )
{
// keep original items
tStmt.m_tQuery.m_dItems.SwapData ( tStmt.m_tQuery.m_dRefItems );
tStmt.m_tQuery.m_dItems = dItems;
// for FACET strip off group by expression items
// these come after count(*)
if ( tStmt.m_tQuery.m_bFacet )
{
ARRAY_FOREACH ( j, tStmt.m_tQuery.m_dRefItems )
{
if ( tStmt.m_tQuery.m_dRefItems[j].m_sAlias=="count(*)" )
{
tStmt.m_tQuery.m_dRefItems.Resize ( j+1 );
break;
}
}
}
tStmt.m_tQuery.m_sGroupDistinct = sDistinct;
}
return true;
}
bool sphParseSqlQuery ( Str_t sQuery, CSphVector<SqlStmt_t> & dStmt, CSphString & sError, ESphCollation eCollation )
{
if ( !IsFilled ( sQuery ) )
{
sError = "query was empty";
return false;
}
SqlParser_c tParser ( dStmt, eCollation, sQuery.first, &sError );
char* sEnd = const_cast<char*> ( end ( sQuery ) );
sEnd[0] = 0; // prepare for yy_scan_buffer
sEnd[1] = 0; // this is ok because string allocates a small gap
yylex_init ( &tParser.m_pScanner );
YY_BUFFER_STATE tLexerBuffer = yy_scan_buffer ( const_cast<char *>( sQuery.first ), sQuery.second+2, tParser.m_pScanner );
if ( !tLexerBuffer )
{
sError = "internal error: yy_scan_buffer() failed";
return false;
}
// uncomment to see everything came to parser.
// sphWarning ( "Query: %s", sQuery.first );
int iRes = yyparse ( &tParser );
yy_delete_buffer ( tLexerBuffer, tParser.m_pScanner );
yylex_destroy ( tParser.m_pScanner );
dStmt.Pop(); // last query is always dummy
// cascade parsing
if ( iRes || dStmt.IsEmpty() )
{
auto eNext = ParseNext ( sQuery, dStmt, sError, !tParser.IsWrongSyntaxError() );
if ( eNext == ParseResult_e::PARSE_OK )
sError = "";
else
sphLogDebug ( "%s", sError.cstr () );
return eNext == ParseResult_e::PARSE_OK;
}
int iFilterStart = 0;
int iFilterCount = 0;
ARRAY_FOREACH ( iStmt, dStmt )
{
// select expressions will be reparsed again, by an expression parser,
// when we have an index to actually bind variables, and create a tree
//
// so at SQL parse stage, we only do quick validation, and at this point,
// we just store the select list for later use by the expression parser
CSphQuery & tQuery = dStmt[iStmt].m_tQuery;
if ( tQuery.m_iSQLSelectStart>=0 )
{
if ( tQuery.m_iSQLSelectStart-1>=0 && tParser.m_pBuf[tQuery.m_iSQLSelectStart-1]=='`' )
tQuery.m_iSQLSelectStart--;
if ( tQuery.m_iSQLSelectEnd<sQuery.second && tParser.m_pBuf[tQuery.m_iSQLSelectEnd]=='`' )
tQuery.m_iSQLSelectEnd++;
tQuery.m_sSelect.SetBinary ( tParser.m_pBuf + tQuery.m_iSQLSelectStart,
tQuery.m_iSQLSelectEnd - tQuery.m_iSQLSelectStart );
}
// validate tablefuncs
// tablefuncs are searchd-level builtins rather than common expression-level functions
// so validation happens here, expression parser does not know tablefuncs (ignorance is bliss)
if ( dStmt[iStmt].m_eStmt==STMT_SELECT && !dStmt[iStmt].m_sTableFunc.IsEmpty() )
{
CSphString & sFunc = dStmt[iStmt].m_sTableFunc;
sFunc.ToUpper();
std::unique_ptr<ISphTableFunc> pFunc;
if ( sFunc=="REMOVE_REPEATS" )
pFunc = CreateRemoveRepeats();
if ( !pFunc )
{
sError.SetSprintf ( "unknown table function %s()", sFunc.cstr() );
return false;
}
if ( !pFunc->ValidateArgs ( dStmt[iStmt].m_dTableFuncArgs, tQuery, sError ) )
return false;
dStmt[iStmt].m_pTableFunc = std::move ( pFunc );
}
// validate filters
for ( const auto& tFilter : tQuery.m_dFilters )
{
const CSphString & sCol = tFilter.m_sAttrName;
if ( !strcasecmp ( sCol.cstr(), "@count" ) || !strcasecmp ( sCol.cstr(), "count(*)" ) )
{
sError.SetSprintf ( "sphinxql: aggregates in 'where' clause prohibited, use 'HAVING'" );
return false;
}
}
iFilterCount = tParser.m_dFiltersPerStmt[iStmt];
// all queries have only plain AND filters - no need for filter tree
if ( iFilterCount && tParser.m_bGotFilterOr )
CreateFilterTree ( tParser.m_dFilterTree, iFilterStart, iFilterCount, tQuery );
else
OptimizeFilters ( tQuery.m_dFilters );
iFilterStart = iFilterCount;
// fixup hints
if ( !CheckQueryHints ( tQuery.m_dIndexHints, sError ) )
return false;
}
if ( iRes!=0 || !dStmt.GetLength() )
return false;
if ( tParser.IsDeprecatedSyntax() )
{
sError = "Using the old-fashion @variables (@count, @weight, etc.) is deprecated";
return false;
}
if ( SetupFacets(dStmt) )
{
if ( !SetupFacetDistinct ( dStmt, sError ) )
return false;
}
else
{
// need to keep same wide result set schema
if ( dStmt.GetLength()>1 )
{
const CSphString & sDistinct = dStmt[0].m_tQuery.m_sGroupDistinct;
for ( int i=1; i<dStmt.GetLength(); i++ )
dStmt[i].m_tQuery.m_sGroupDistinct = sDistinct;
}
}
return true;
}
void SqlParser_SplitClusterIndex ( CSphString & sIndex, CSphString * pCluster )
{
if ( sIndex.IsEmpty() )
return;
const char * sDelimiter = strchr ( sIndex.cstr(), ':' );
if ( sDelimiter )
{
CSphString sTmp = sIndex; // m_sIndex.SetBinary can not accept this(m_sIndex) pointer
int iPos = int ( sDelimiter - sIndex.cstr() );
int iLen = sIndex.Length();
sIndex.SetBinary ( sTmp.cstr() + iPos + 1, iLen - iPos - 1 );
if ( pCluster )
pCluster->SetBinary ( sTmp.cstr(), iPos );
}
}
//////////////////////////////////////////////////////////////////////////
bool PercolateParseFilters ( const char * sFilters, ESphCollation eCollation, const CSphSchema & tSchema, CSphVector<CSphFilterSettings> & dFilters, CSphVector<FilterTreeItem_t> & dFilterTree, CSphString & sError )
{
if ( !sFilters || !*sFilters )
return true;
StringBuilder_c sBuf;
sBuf << "sysfilters " << sFilters;
int iLen = sBuf.GetLength();
CSphVector<SqlStmt_t> dStmt;
SqlParser_c tParser ( dStmt, eCollation, sBuf.cstr(), &sError );
tParser.m_sErrorHeader = "P06:";
char * sEnd = const_cast<char *>( sBuf.cstr() ) + iLen;
sEnd[0] = 0; // prepare for yy_scan_buffer
sEnd[1] = 0; // this is ok because string allocates a small gap
yylex_init ( &tParser.m_pScanner );
YY_BUFFER_STATE tLexerBuffer = yy_scan_buffer ( const_cast<char *>( sBuf.cstr() ), iLen+2, tParser.m_pScanner );
if ( !tLexerBuffer )
{
sError = "internal error: yy_scan_buffer() failed";
return false;
}
int iRes = yyparse ( &tParser );
yy_delete_buffer ( tLexerBuffer, tParser.m_pScanner );
yylex_destroy ( tParser.m_pScanner );
dStmt.Pop(); // last query is always dummy
if ( dStmt.GetLength()>1 )
{
sError.SetSprintf ( "internal error: too many filter statements, got %d", dStmt.GetLength() );
return false;
}
if ( dStmt.GetLength() && dStmt[0].m_eStmt!=STMT_SYSFILTERS )
{
sError.SetSprintf ( "internal error: not filter statement parsed, got %d", dStmt[0].m_eStmt );
return false;
}
if ( dStmt.GetLength() )
{
CSphQuery & tQuery = dStmt[0].m_tQuery;
int iFilterCount = tParser.m_dFiltersPerStmt[0];
CreateFilterTree ( tParser.m_dFilterTree, 0, iFilterCount, tQuery );
dFilters.SwapData ( tQuery.m_dFilters );
dFilterTree.SwapData ( tQuery.m_dFilterTree );
}
// maybe its better to create real filter instead of just checking column name
if ( iRes==0 && dFilters.GetLength() )
{
ARRAY_FOREACH ( i, dFilters )
{
const CSphFilterSettings & tFilter = dFilters[i];
if ( tFilter.m_sAttrName.IsEmpty() )
{
sError.SetSprintf ( "bad filter %d name", i );
return false;
}
if ( tFilter.m_sAttrName.Begins ( "@" ) )
{
sError.SetSprintf ( "unsupported filter column '%s'", tFilter.m_sAttrName.cstr() );
return false;
}
const char * sAttrName = tFilter.m_sAttrName.cstr();
// might be a JSON.field
CSphString sJsonField;
const char * sJsonDot = strchr ( sAttrName, '.' );
if ( sJsonDot )
{
assert ( sJsonDot>sAttrName );
sJsonField.SetBinary ( sAttrName, int ( sJsonDot - sAttrName ) );
sAttrName = sJsonField.cstr();
}
int iCol = tSchema.GetAttrIndex ( sAttrName );
if ( iCol==-1 )
{
sError.SetSprintf ( "no such filter attribute '%s'", sAttrName );
return false;
}
}
}
// TODO: change way of filter -> expression create: produce single error, share parser code
// try expression
if ( iRes!=0 && !dFilters.GetLength() && sError.Begins ( "P06: syntax error" ) )
{
ESphAttr eAttrType = SPH_ATTR_NONE;
ExprParseArgs_t tExprArgs;
tExprArgs.m_pAttrType = &eAttrType;
tExprArgs.m_eCollation = eCollation;
ISphExprRefPtr_c pExpr { sphExprParse ( sFilters, tSchema, nullptr, sError, tExprArgs ) };
if ( pExpr )
{
sError = "";
iRes = 0;
CSphFilterSettings & tExpr = dFilters.Add();
tExpr.m_eType = SPH_FILTER_EXPRESSION;
tExpr.m_sAttrName = sFilters;
} else
{
return false;
}
}
return ( iRes==0 );
}
| 66,320
|
C++
|
.cpp
| 1,838
| 33.655604
| 214
| 0.699883
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,839
|
sphinxstemru.cpp
|
manticoresoftware_manticoresearch/src/sphinxstemru.cpp
|
//
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxdefs.h" // for USE_LITTLE_ENDIAN
#include <string.h>
/////////////////////////////////////////////////////////////////////////////
// UTF-8 implementation
/////////////////////////////////////////////////////////////////////////////
#if USE_LITTLE_ENDIAN
struct RussianAlphabetUTF8_t
{
enum
{
A = 0xB0D0U,
B = 0xB1D0U,
V = 0xB2D0U,
G = 0xB3D0U,
D = 0xB4D0U,
E = 0xB5D0U,
YO = 0x91D1U,
ZH = 0xB6D0U,
Z = 0xB7D0U,
I = 0xB8D0U,
IY = 0xB9D0U,
K = 0xBAD0U,
L = 0xBBD0U,
M = 0xBCD0U,
N = 0xBDD0U,
O = 0xBED0U,
P = 0xBFD0U,
R = 0x80D1U,
S = 0x81D1U,
T = 0x82D1U,
U = 0x83D1U,
F = 0x84D1U,
H = 0x85D1U,
TS = 0x86D1U,
CH = 0x87D1U,
SH = 0x88D1U,
SCH = 0x89D1U,
TVY = 0x8AD1U, // TVYordiy znak
Y = 0x8BD1U,
MYA = 0x8CD1U, // MYAgkiy znak
EE = 0x8DD1U,
YU = 0x8ED1U,
YA = 0x8FD1U
};
};
#else
struct RussianAlphabetUTF8_t
{
enum
{
A = 0xD0B0U,
B = 0xD0B1U,
V = 0xD0B2U,
G = 0xD0B3U,
D = 0xD0B4U,
E = 0xD0B5U,
YO = 0xD191U,
ZH = 0xD0B6U,
Z = 0xD0B7U,
I = 0xD0B8U,
IY = 0xD0B9U,
K = 0xD0BAU,
L = 0xD0BBU,
M = 0xD0BCU,
N = 0xD0BDU,
O = 0xD0BEU,
P = 0xD0BFU,
R = 0xD180U,
S = 0xD181U,
T = 0xD182U,
U = 0xD183U,
F = 0xD184U,
H = 0xD185U,
TS = 0xD186U,
CH = 0xD187U,
SH = 0xD188U,
SCH = 0xD189U,
TVY = 0xD18AU, // TVYordiy znak
Y = 0xD18BU,
MYA = 0xD18CU, // MYAgkiy znak
EE = 0xD18DU,
YU = 0xD18EU,
YA = 0xD18FU
};
};
#endif
#define LOC_CHAR_TYPE unsigned short
#define LOC_PREFIX(_a) _a##_utf8
#define RUS RussianAlphabetUTF8_t
#include "sphinxstemru.inl" // NOLINT 2nd include
/////////////////////////////////////////////////////////////////////////////
void stem_ru_init ()
{
stem_ru_init_utf8 ();
}
| 2,151
|
C++
|
.cpp
| 105
| 18.085714
| 77
| 0.584396
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
16,840
|
aggregate.cpp
|
manticoresoftware_manticoresearch/src/aggregate.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "aggregate.h"
#include "schema/columninfo.h"
/// aggregate traits for different attribute types
template < typename T >
class AggrFunc_Traits_T : public AggrFunc_i
{
public:
explicit AggrFunc_Traits_T ( const CSphAttrLocator & tLocator ) : m_tLocator ( tLocator ) {}
T GetValue ( const CSphMatch & tRow );
void SetValue ( CSphMatch & tRow, T val );
protected:
CSphAttrLocator m_tLocator;
};
template<>
inline DWORD AggrFunc_Traits_T<DWORD>::GetValue ( const CSphMatch & tRow )
{
return (DWORD) tRow.GetAttr ( m_tLocator );
}
template<>
inline void AggrFunc_Traits_T<DWORD>::SetValue ( CSphMatch & tRow, DWORD val )
{
tRow.SetAttr ( m_tLocator, val );
}
template<>
inline int64_t AggrFunc_Traits_T<int64_t>::GetValue ( const CSphMatch & tRow )
{
return tRow.GetAttr ( m_tLocator );
}
template<>
inline void AggrFunc_Traits_T<int64_t>::SetValue ( CSphMatch & tRow, int64_t val )
{
tRow.SetAttr ( m_tLocator, val );
}
template<>
inline float AggrFunc_Traits_T<float>::GetValue ( const CSphMatch & tRow )
{
return tRow.GetAttrFloat ( m_tLocator );
}
template<>
inline void AggrFunc_Traits_T<float>::SetValue ( CSphMatch & tRow, float val )
{
tRow.SetAttrFloat ( m_tLocator, val );
}
template<>
inline double AggrFunc_Traits_T<double>::GetValue ( const CSphMatch & tRow )
{
return tRow.GetAttrDouble ( m_tLocator );
}
template<>
inline void AggrFunc_Traits_T<double>::SetValue ( CSphMatch & tRow, double val )
{
tRow.SetAttrDouble ( m_tLocator, val );
}
template < typename T >
class AggrColumnar_Traits_T : public AggrFunc_Traits_T<T>
{
using BASE = AggrFunc_Traits_T<T>;
public:
AggrColumnar_Traits_T ( const CSphAttrLocator & tLoc, const CSphString & sAttr )
: AggrFunc_Traits_T<T> ( tLoc )
, m_sAttr ( sAttr )
{}
void SetColumnar ( columnar::Columnar_i * pColumnar ) final
{
if ( pColumnar )
{
std::string sError; // FIXME! report errors
m_pIterator = CreateColumnarIterator ( pColumnar, m_sAttr.cstr(), sError );
columnar::AttrInfo_t tAttrInfo;
if ( pColumnar->GetAttrInfo ( m_sAttr.cstr(), tAttrInfo ) )
m_eType = tAttrInfo.m_eType;
}
else
m_pIterator.reset();
}
void Setup ( CSphMatch & tDst, const CSphMatch & tSrc, bool bMerge ) final
{
BASE::SetValue ( tDst, FetchValue ( tSrc, bMerge ) );
}
protected:
CSphString m_sAttr;
common::AttrType_e m_eType = common::AttrType_e::NONE;
std::unique_ptr<columnar::Iterator_i> m_pIterator;
inline T FetchValue ( const CSphMatch & tSrc, bool bMerge )
{
if ( bMerge )
return BASE::GetValue(tSrc);
if ( m_eType==common::AttrType_e::FLOAT )
return (T)sphDW2F ( (DWORD)m_pIterator->Get ( tSrc.m_tRowID ) );
return (T)m_pIterator->Get ( tSrc.m_tRowID );
}
};
/// SUM() implementation
template < typename T >
class AggrSum_T final : public AggrFunc_Traits_T<T>
{
using BASE = AggrFunc_Traits_T<T>;
using BASE::BASE;
public:
explicit AggrSum_T ( const CSphAttrLocator & tLoc ) : AggrFunc_Traits_T<T> ( tLoc )
{}
void Update ( CSphMatch & tDst, const CSphMatch & tSrc, bool bGrouped, bool bMerge ) final
{
T tSrcValue = BASE::GetValue(tSrc);
T tDstValue = BASE::GetValue(tDst);
if ( tSrcValue )
BASE::SetValue ( tDst, tDstValue+tSrcValue );
}
};
template < typename T >
class AggrSumColumnar_T final : public AggrColumnar_Traits_T<T>
{
using BASE = AggrColumnar_Traits_T<T>;
using BASE::BASE;
public:
void Update ( CSphMatch & tDst, const CSphMatch & tSrc, bool bGrouped, bool bMerge ) final
{
T tSrcValue = BASE::FetchValue ( tSrc, bMerge );
T tDstValue = BASE::GetValue(tDst);
if ( tSrcValue )
BASE::SetValue ( tDst, tDstValue+tSrcValue );
}
};
/// AVG() implementation
template < typename T >
class AggrAvg_T final : public AggrFunc_Traits_T<T>
{
CSphAttrLocator m_tCountLoc;
using AggrFunc_Traits_T<T>::GetValue;
using AggrFunc_Traits_T<T>::SetValue;
public:
AggrAvg_T ( const CSphAttrLocator & tLoc, const CSphAttrLocator & tCountLoc )
: AggrFunc_Traits_T<T> ( tLoc ), m_tCountLoc ( tCountLoc )
{}
void Ungroup ( CSphMatch & tDst ) final
{
SetValue ( tDst, T ( GetValue ( tDst ) * tDst.GetAttr ( m_tCountLoc ) ) );
}
void Update ( CSphMatch & tDst, const CSphMatch & tSrc, bool bGrouped, bool bMerge ) final
{
T tSrcValue = GetValue(tSrc);
T tDstValue = GetValue(tDst);
if ( bGrouped )
SetValue ( tDst, tDstValue + T ( tSrcValue*tSrc.GetAttr(m_tCountLoc) ) );
else if ( tSrcValue )
SetValue ( tDst, tDstValue+tSrcValue );
}
void Finalize ( CSphMatch & tDst ) final
{
auto uAttr = tDst.GetAttr ( m_tCountLoc );
if ( uAttr )
SetValue ( tDst, T ( GetValue(tDst) / uAttr ) );
}
};
template < typename T >
class AggrAvgColumnar_T final : public AggrColumnar_Traits_T<T>
{
using BASE = AggrColumnar_Traits_T<T>;
using BASE::GetValue;
using BASE::SetValue;
public:
AggrAvgColumnar_T ( const CSphAttrLocator & tLoc, const CSphString & sAttr, const CSphAttrLocator & tCountLoc )
: BASE ( tLoc, sAttr )
, m_tCountLoc ( tCountLoc )
{}
void Ungroup ( CSphMatch & tDst ) final
{
SetValue ( tDst, T ( GetValue ( tDst ) * tDst.GetAttr ( m_tCountLoc ) ) );
}
void Update ( CSphMatch & tDst, const CSphMatch & tSrc, bool bGrouped, bool bMerge ) final
{
T tSrcValue = BASE::FetchValue ( tSrc, bMerge );
T tDstValue = GetValue(tDst);
if ( bGrouped )
SetValue ( tDst, tDstValue + T( tSrcValue*tSrc.GetAttr(m_tCountLoc) ) );
else if ( tSrcValue )
SetValue ( tDst, tDstValue + tSrcValue );
}
void Finalize ( CSphMatch & tDst ) final
{
auto uAttr = tDst.GetAttr ( m_tCountLoc );
if ( uAttr )
SetValue ( tDst, T ( GetValue(tDst) / uAttr ) );
}
private:
CSphAttrLocator m_tCountLoc;
};
/// MAX() implementation
template < typename T >
class AggrMax_T final : public AggrFunc_Traits_T<T>
{
using BASE=AggrFunc_Traits_T<T>;
using BASE::BASE;
public:
void Update ( CSphMatch & tDst, const CSphMatch & tSrc, bool bGrouped, bool bMerge ) final
{
T tSrcValue = BASE::GetValue(tSrc);
T tDstValue = BASE::GetValue(tDst);
if ( tSrcValue>tDstValue )
BASE::SetValue ( tDst, tSrcValue );
}
};
template < typename T >
class AggrMaxColumnar_T final : public AggrColumnar_Traits_T<T>
{
using BASE = AggrColumnar_Traits_T<T>;
using BASE::BASE;
public:
void Update ( CSphMatch & tDst, const CSphMatch & tSrc, bool bGrouped, bool bMerge ) final
{
T tSrcValue = BASE::FetchValue ( tSrc, bMerge );
T tDstValue = BASE::GetValue(tDst);
if ( tSrcValue>tDstValue )
BASE::SetValue ( tDst, tSrcValue );
}
};
/// MIN() implementation
template < typename T >
class AggrMin_T final : public AggrFunc_Traits_T<T>
{
using BASE = AggrFunc_Traits_T<T>;
using BASE::BASE;
public:
void Update ( CSphMatch & tDst, const CSphMatch & tSrc, bool bGrouped, bool bMerge ) final
{
T tSrcValue = BASE::GetValue(tSrc);
T tDstValue = BASE::GetValue(tDst);
if ( tSrcValue<tDstValue )
BASE::SetValue ( tDst, tSrcValue );
}
};
template < typename T >
class AggrMinColumnar_T final : public AggrColumnar_Traits_T<T>
{
using BASE = AggrColumnar_Traits_T<T>;
using BASE::BASE;
public:
void Update ( CSphMatch & tDst, const CSphMatch & tSrc, bool bGrouped, bool bMerge ) final
{
T tSrcValue = BASE::FetchValue ( tSrc, bMerge );
T tDstValue = BASE::GetValue(tDst);
if ( tSrcValue<tDstValue )
BASE::SetValue ( tDst, tSrcValue );
}
};
/// GROUP_CONCAT
/* What is that magic about?
*
* In simplest usecase - you have in group matches with 'foo', 'bar' -> group_concat produces 'foo,bar' - no magic.
*
* Make a bit complex: one and same sorter processes sequentaly several indexes (chunks), and collects group result.
* In this case if 1-st chunk gives 'foo', 2-nd gives 'bar', you still can achieve 'foo,bar' naturally, no magic.
*
* A bit more complex: several sorters process cloud of chunks in parallel, then merge results.
* Say, you have 3 chunks, giving 'foo', 'bar' and 'bazz'. Result you've expect is 'foo,bar,bazz'.
* In parallel with, say, 2 sorters, one processed 1-st and 3-rd chunk, second - middle.
* One gives you 'foo,bazz', second - 'bar'.
*
* What is to do on merge then?
*
* Simplest: just merge 'as is'. I.e., return 'foo,bazz,bar' despite the broken order.
* However that is appropriate only in the narrow case when ordering is not requested. That is *NOT* our way.
*
* Each match came from a chunk is tagged with the order num of that chunk.
* When we have matches from the same chunk, we just group them usual way with no magic, naturally concatenating strings.
* If all the matches tagged same - we just achieve usual string out of the box, with no magic at all.
* If into non-empty group came match with another tag, we use this model of blob:
*
* '\0', <N> <TAG1> <STRLEN1> chars1 <TAG2> <STRLEN2> chars2 ... <TAGN> <STRLEN> bytesN
*
* First \0 marks that the whole blob is special.
* Each tagged string inside includes concatenated values of the matches from that tag.
* For described foo-bar-baz in two sorters it will look like:
*
* '\0' 2 1 3 foo 3 4 bazz // in the 1-st sorter. 2 chunks, from tag 1 with len 3 'foo', from tag 3 with len 4 'bazz'
* bar // in the 2-nd sorter. Simple plain 'bar' (tag is not saved here, it is still an attribute of the match itself).
*
* Then we can merge results, taking tag for value came from 2-nd sorter from that match itself.
*
* '\0' 3 1 3 foo 2 3 bar 3 4 bazz
*
* That finally deserializes into expected user string 'foo,bar,bazz'. So, target achieved.
*
* One optimization here is that we expect matches with monotonically changing tags. I.e., if we have processed chunk 1,
* and stay on chunk 3 - then next match will never came with tag 1 or 2, as these numbers already passed.
* So, no need to 'insert into middle', we always pushes new data to tail of the blob. That makes everything simpler.
*
*/
// helpers to blob serialization
using BStream_c = TightPackedVec_T<BYTE>;
static BStream_c & operator<< ( BStream_c & dOut, const ByteBlob_t & tData )
{
dOut.Append ( tData.first, tData.second );
return dOut;
}
template <typename NUM>
static BStream_c & operator<< ( BStream_c & dOut, NUM iNum )
{
sphUnalignedWrite ( dOut.AddN ( sizeof ( NUM ) ), iNum );
return dOut;
}
// unused for now
/*
template<typename T>
static BStream_c & operator<< ( BStream_c & dOut, const VecTraits_T<T> & tData )
{
dOut << tData.GetLength ();
tData.Apply ( [&dOut] ( const T & tChunk ) { dOut << tChunk; } );
return dOut;
}
*/
static BStream_c & operator<< ( BStream_c & dOut, const VecTraits_T<BYTE> & tData )
{
return dOut << tData.GetLength () << ByteBlob_t { tData.begin(), tData.GetLength () };
}
// unused for now
/*
static BStream_c & operator<< ( BStream_c & dOut, const CSphString& sData )
{
return dOut << VecTraits_T<BYTE> ( (BYTE*) const_cast<char*>( sData.cstr() ), sData.Length() );
}
static BStream_c & operator<< ( BStream_c & dOut, const StringBuilder_c & sData )
{
return dOut << VecTraits_T<BYTE> ( (BYTE*) const_cast<char*>( sData.cstr() ), sData.GetLength () );
}
*/
// helpers to de-serialize
template<typename NUM>
static ByteBlob_t & operator>> ( ByteBlob_t & dIn, NUM & iNum )
{
assert ( dIn.first );
assert ( dIn.second>=(int)sizeof (NUM) );
iNum = sphUnalignedRead ( *(const NUM *) dIn.first );
dIn.first += sizeof ( NUM );
dIn.second -= sizeof ( NUM );
return dIn;
}
static ByteBlob_t & operator>> ( ByteBlob_t & dIn, ByteBlob_t & tData )
{
assert ( dIn.first );
assert ( dIn.second>=tData.second );
tData.first = dIn.first;
dIn.first += tData.second;
dIn.second -= tData.second;
return dIn;
}
// unused for now
/*template<typename T>
static ByteBlob_t & operator>> ( ByteBlob_t & dIn, CSphVector<T> & tData )
{
int iLen;
dIn >> iLen;
tData.Resize ( iLen );
tData.Apply ( [&dIn] ( T & tChunk ) { dIn >> tChunk; } );
return dIn;
}
*/
static ByteBlob_t & operator>> ( ByteBlob_t & dIn, VecTraits_T<BYTE> & tData )
{
int iLen;
dIn >> iLen;
ByteBlob_t tChunk { nullptr, iLen };
dIn >> tChunk;
tData = tChunk;
return dIn;
}
// The GROUP_CONCAT() implementation
class AggrConcat_c final : public AggrFunc_i
{
CSphAttrLocator m_tLoc;
public:
explicit AggrConcat_c ( const CSphColumnInfo & tCol )
: m_tLoc ( tCol.m_tLocator )
{
assert ( tCol.m_eAttrType==SPH_ATTR_STRINGPTR );
assert ( !m_tLoc.IsBlobAttr ()); // otherwise we will fail on fetching data!
}
// here we convert back to plain string
void Finalize ( CSphMatch & tMatch ) final
{
auto dSrc = tMatch.FetchAttrData ( m_tLoc, nullptr ); // expect serialized tagged strings
// empty match
if ( !dSrc.first )
return;
// already grouped match
if ( *dSrc.first )
return;
auto dBlob = dSrc;
int iSize, iTag, iFinalSize;
BStream_c dOut;
VecTraits_T<BYTE> dString;
BYTE uZero; dBlob >> uZero;
dBlob >> iSize;
iFinalSize = dBlob.second - ( iSize * 2 * sizeof ( int ) ) + iSize - 1 + 20; // -tag, -len, +commas-1, +packlen
dOut.Reserve ( iFinalSize );
for ( int i=0; i<iSize; ++i )
{
if ( i>0 ) dOut << ',';
dBlob >> iTag >> dString;
dOut << ByteBlob_t { dString.begin (), dString.GetLength () }; // write raw blob, without length
}
// release previous, write converted
sphDeallocatePacked ( sphPackedBlob ( dSrc ) );
sphPackPtrAttrInPlace ( dOut );
tMatch.SetAttr ( m_tLoc, (SphAttr_t) dOut.LeakData () );
}
void Update ( CSphMatch & tDst, const CSphMatch & tSrc, bool bGrouped, bool bMerge ) final
{
ByteBlob_t dSrc = tSrc.FetchAttrData ( m_tLoc, nullptr ); // ok since it is NOT a blob attr
ByteBlob_t dDst = tDst.FetchAttrData ( m_tLoc, nullptr );
// empty source? kinda weird, but done!
if ( !dSrc.first || !dSrc.second )
return;
BStream_c dOut;
if ( !dDst.first )
dOut << dSrc;
else if ( *dSrc.first && *dDst.first ) // first byte is a mark: 0 means data packed, another is part of real string.
AppendStringToString ( dOut, dDst, tDst.m_iTag, dSrc, tSrc.m_iTag );
else if ( *dSrc.first && !*dDst.first )
AppendBlobToString ( dOut, dSrc, tSrc.m_iTag, dDst, false );
else if ( !*dSrc.first && *dDst.first )
AppendBlobToString ( dOut, dDst, tDst.m_iTag, dSrc, true );
else // if ( !*dSrc.first && !*dDst.first )
AppendBlobToBlob ( dOut, dDst, dSrc );
// Dispose previous packet
sphDeallocatePacked ( sphPackedBlob ( dDst ) );
// update saved data
sphPackPtrAttrInPlace (dOut);
tDst.SetAttr ( m_tLoc, (SphAttr_t) dOut.LeakData () );
}
private:
// merge two simple matches
static void AppendStringToString ( BStream_c & dOut, const ByteBlob_t & dInDst, int iTagDst, const ByteBlob_t & dInSrc, int iTagSrc )
{
if ( iTagDst==iTagSrc ) // plain concat of 2 strings
dOut << dInDst << ',' << dInSrc;
else // produce complex match
dOut << '\0' << int(2)
<< iTagDst << dInDst.second << dInDst
<< iTagSrc << dInSrc.second << dInSrc;
}
static void WriteCount ( BStream_c& dOut, int iCount )
{
// update total num of elements
int iCurrentLen = dOut.GetLength ();
dOut.Resize ( 1 ); // since 1-st came '\0' mark
dOut << iCount;
dOut.Resize ( iCurrentLen );
}
// merge two complex matches
static void AppendBlobToBlob ( BStream_c& dOut, ByteBlob_t dInDst, ByteBlob_t dInSrc )
{
int iOut = 0;
dOut << '\0' << iOut; // mark of complex and placeholder to num of elems.
int iSizeSrc = 0, iSizeDst = 0, iTagSrc, iTagDst;
VecTraits_T<BYTE> dBlobSrc, dBlobDst;
// read num of elements in both matches
char cZero;
dInSrc >> cZero >> iSizeSrc;
assert ( cZero=='\0' );
dInDst >> cZero >> iSizeDst;
assert ( cZero=='\0' );
auto fnNextSrc = [&] { if (iSizeSrc<=0) iTagSrc = INT_MIN; else {dInSrc >> iTagSrc >> dBlobSrc; --iSizeSrc;} };
auto fnNextDst = [&] { if (iSizeDst<=0) iTagDst = INT_MIN; else {dInDst >> iTagDst >> dBlobDst; --iSizeDst;} };
// merge two matches
fnNextSrc ();
fnNextDst ();
while ( iTagSrc!=INT_MIN || iTagDst!=INT_MIN )
{
if ( iTagSrc < iTagDst ) {
dOut << iTagDst << dBlobDst;
fnNextDst();
} else if ( iTagDst < iTagSrc ) {
dOut << iTagSrc << dBlobSrc;
fnNextSrc();
} else {
assert ( iTagSrc!=INT_MAX || iTagDst!=INT_MAX );
dOut << iTagSrc;
if ( dBlobDst.IsEmpty() )
dOut << dBlobSrc;
else
dOut << dBlobDst.GetLength() + dBlobSrc.GetLength() + 1
<< ByteBlob_t ( dBlobDst.begin(), dBlobDst.GetLength() )
<< ',' << ByteBlob_t ( dBlobSrc.begin(), dBlobSrc.GetLength() );
fnNextSrc();
fnNextDst();
}
++iOut;
}
WriteCount ( dOut, iOut );
}
// merge string and blob. Last bool determines what will came first
static void AppendBlobToString ( BStream_c & dOut, const ByteBlob_t & dString, int iTagString, ByteBlob_t dBlob, bool bStringFirst=true )
{
int iOut;
char cZero;
dBlob >> cZero >> iOut;
assert ( cZero=='\0' );
dOut << cZero << iOut; // mark of complex and placeholder to num of elems.
int iTagSrc;
VecTraits_T<BYTE> dBlobSrc;
bool bCopied = false;
// copy elems looking for the place of new one
for ( int i=0, iOldLen=iOut; i<iOldLen; ++i)
{
dBlob >> iTagSrc >> dBlobSrc;
if ( bCopied )
dOut << iTagSrc << dBlobSrc;
else
{
if ( !bCopied && iTagString > iTagSrc )
{
dOut << iTagString << dString.second << dString << iTagSrc << dBlobSrc;
++iOut;
bCopied = true;
} else if ( !bCopied && iTagString==iTagSrc )
{
dOut << iTagString << dString.second + dBlobSrc.GetLength() + 1;
if ( bStringFirst )
dOut << dString << ',' << ByteBlob_t ( dBlobSrc.begin(), dBlobSrc.GetLength() );
else
dOut << ByteBlob_t ( dBlobSrc.begin(), dBlobSrc.GetLength() ) << ',' << dString;
bCopied = true;
} else
dOut << iTagSrc << dBlobSrc;
}
}
if ( !bCopied )
{
dOut << iTagString << dString.second << dString;
++iOut;
}
WriteCount ( dOut, iOut );
}
};
/////////////////////////////////////////////////////////////////////
static bool GetColumnarCol ( const CSphColumnInfo & tAttr, CSphString & sColumnarCol )
{
if ( tAttr.m_pExpr && tAttr.m_eStage==SPH_EVAL_SORTER )
{
tAttr.m_pExpr->Command ( SPH_EXPR_GET_COLUMNAR_COL, &sColumnarCol );
return !sColumnarCol.IsEmpty();
}
return false;
}
AggrFunc_i * CreateAggrSum ( const CSphColumnInfo & tAttr )
{
assert ( tAttr.m_eAggrFunc==SPH_AGGR_SUM );
CSphString sColumnarCol;
bool bColumnar = GetColumnarCol ( tAttr, sColumnarCol );
switch ( tAttr.m_eAttrType )
{
case SPH_ATTR_INTEGER:
case SPH_ATTR_BOOL:
case SPH_ATTR_TIMESTAMP:
if ( bColumnar )
return new AggrSumColumnar_T<DWORD> ( tAttr.m_tLocator, sColumnarCol );
return new AggrSum_T<DWORD> ( tAttr.m_tLocator );
case SPH_ATTR_BIGINT:
if ( bColumnar )
return new AggrSumColumnar_T<int64_t> ( tAttr.m_tLocator, sColumnarCol );
return new AggrSum_T<int64_t> ( tAttr.m_tLocator );
case SPH_ATTR_FLOAT:
if ( bColumnar )
return new AggrSumColumnar_T<float> ( tAttr.m_tLocator, sColumnarCol );
return new AggrSum_T<float> ( tAttr.m_tLocator );
default:
assert ( 0 && "internal error: unhandled aggregate type" );
return nullptr;
}
}
AggrFunc_i * CreateAggrAvg ( const CSphColumnInfo & tAttr, const CSphAttrLocator & tCount )
{
assert ( tAttr.m_eAggrFunc==SPH_AGGR_AVG );
CSphString sColumnarCol;
bool bColumnar = GetColumnarCol ( tAttr, sColumnarCol );
switch ( tAttr.m_eAttrType )
{
case SPH_ATTR_INTEGER:
case SPH_ATTR_BOOL:
case SPH_ATTR_TIMESTAMP:
if ( bColumnar )
return new AggrAvgColumnar_T<DWORD> ( tAttr.m_tLocator, sColumnarCol, tCount );
return new AggrAvg_T<DWORD> ( tAttr.m_tLocator, tCount );
case SPH_ATTR_BIGINT:
if ( bColumnar )
return new AggrAvgColumnar_T<int64_t> ( tAttr.m_tLocator, sColumnarCol, tCount );
return new AggrAvg_T<int64_t> ( tAttr.m_tLocator, tCount );
case SPH_ATTR_FLOAT:
if ( bColumnar )
return new AggrAvgColumnar_T<float> ( tAttr.m_tLocator, sColumnarCol, tCount );
return new AggrAvg_T<float> ( tAttr.m_tLocator, tCount );
case SPH_ATTR_DOUBLE:
if ( bColumnar )
return new AggrAvgColumnar_T<double> ( tAttr.m_tLocator, sColumnarCol, tCount );
return new AggrAvg_T<double> ( tAttr.m_tLocator, tCount );
default:
assert ( 0 && "internal error: unhandled aggregate type" );
return nullptr;
}
}
AggrFunc_i * CreateAggrMin ( const CSphColumnInfo & tAttr )
{
assert ( tAttr.m_eAggrFunc==SPH_AGGR_MIN );
CSphString sColumnarCol;
bool bColumnar = GetColumnarCol ( tAttr, sColumnarCol );
switch ( tAttr.m_eAttrType )
{
case SPH_ATTR_INTEGER:
case SPH_ATTR_BOOL:
case SPH_ATTR_TIMESTAMP:
if ( bColumnar )
return new AggrMinColumnar_T<DWORD> ( tAttr.m_tLocator, sColumnarCol );
return new AggrMin_T<DWORD> ( tAttr.m_tLocator );
case SPH_ATTR_BIGINT:
if ( bColumnar )
return new AggrMinColumnar_T<int64_t> ( tAttr.m_tLocator, sColumnarCol );
return new AggrMin_T<int64_t> ( tAttr.m_tLocator );
case SPH_ATTR_FLOAT:
if ( bColumnar )
return new AggrMinColumnar_T<float> ( tAttr.m_tLocator, sColumnarCol );
return new AggrMin_T<float> ( tAttr.m_tLocator );
case SPH_ATTR_DOUBLE:
if ( bColumnar )
return new AggrMinColumnar_T<double> ( tAttr.m_tLocator, sColumnarCol );
return new AggrMin_T<double> ( tAttr.m_tLocator );
default:
assert ( 0 && "internal error: unhandled aggregate type" );
return nullptr;
}
}
AggrFunc_i * CreateAggrMax ( const CSphColumnInfo & tAttr )
{
assert ( tAttr.m_eAggrFunc==SPH_AGGR_MAX );
CSphString sColumnarCol;
bool bColumnar = GetColumnarCol ( tAttr, sColumnarCol );
switch ( tAttr.m_eAttrType )
{
case SPH_ATTR_INTEGER:
case SPH_ATTR_BOOL:
case SPH_ATTR_TIMESTAMP:
if ( bColumnar )
return new AggrMaxColumnar_T<DWORD> ( tAttr.m_tLocator, sColumnarCol );
return new AggrMax_T<DWORD> ( tAttr.m_tLocator );
case SPH_ATTR_BIGINT:
if ( bColumnar )
return new AggrMaxColumnar_T<int64_t> ( tAttr.m_tLocator, sColumnarCol );
return new AggrMax_T<int64_t> ( tAttr.m_tLocator );
case SPH_ATTR_FLOAT:
if ( bColumnar )
return new AggrMaxColumnar_T<float> ( tAttr.m_tLocator, sColumnarCol );
return new AggrMax_T<float> ( tAttr.m_tLocator );
case SPH_ATTR_DOUBLE:
if ( bColumnar )
return new AggrMaxColumnar_T<double> ( tAttr.m_tLocator, sColumnarCol );
return new AggrMax_T<double> ( tAttr.m_tLocator );
default:
assert ( 0 && "internal error: unhandled aggregate type" );
return nullptr;
}
}
/////////////////////////////////////////////////////////////////////
AggrFunc_i * CreateAggrConcat ( const CSphColumnInfo & tAttr )
{
return new AggrConcat_c(tAttr);
}
| 22,898
|
C++
|
.cpp
| 677
| 31.285081
| 138
| 0.693935
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,841
|
compressed_zstd_mysql.cpp
|
manticoresoftware_manticoresearch/src/compressed_zstd_mysql.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "compressed_zstd_mysql.h"
#include "compressed_mysql_layer.h"
#include <zstd.h>
#if DL_ZSTD
static decltype ( &ZSTD_createCCtx ) sph_ZSTD_createCCtx = nullptr;
static decltype ( &ZSTD_createDCtx ) sph_ZSTD_createDCtx = nullptr;
static decltype ( &ZSTD_freeDCtx ) sph_ZSTD_freeDCtx = nullptr;
static decltype ( &ZSTD_freeCCtx ) sph_ZSTD_freeCCtx = nullptr;
static decltype ( &ZSTD_compressBound ) sph_ZSTD_compressBound = nullptr;
static decltype ( &ZSTD_compressCCtx ) sph_ZSTD_compressCCtx = nullptr;
static decltype ( &ZSTD_decompressDCtx ) sph_ZSTD_decompressDCtx = nullptr;
static decltype ( &ZSTD_isError ) sph_ZSTD_isError = nullptr;
static bool InitDynamicZstd()
{
const char* sFuncs[] = { "ZSTD_createCCtx", "ZSTD_createDCtx", "ZSTD_freeDCtx", "ZSTD_freeCCtx", "ZSTD_compressBound", "ZSTD_compressCCtx", "ZSTD_decompressDCtx", "ZSTD_isError" };
void** pFuncs[] = { (void**)&sph_ZSTD_createCCtx, (void**)&sph_ZSTD_createDCtx, (void**)&sph_ZSTD_freeDCtx, (void**)&sph_ZSTD_freeCCtx, (void**)&sph_ZSTD_compressBound, (void**)&sph_ZSTD_compressCCtx, (void**)&sph_ZSTD_decompressDCtx, (void**)&sph_ZSTD_isError };
static CSphDynamicLibrary dLib ( ZSTD_LIB );
return dLib.LoadSymbols ( sFuncs, pFuncs, sizeof ( pFuncs ) / sizeof ( void** ) );
}
#else
#define sph_ZSTD_createCCtx ZSTD_createCCtx
#define sph_ZSTD_createDCtx ZSTD_createDCtx
#define sph_ZSTD_freeDCtx ZSTD_freeDCtx
#define sph_ZSTD_freeCCtx ZSTD_freeCCtx
#define sph_ZSTD_compressBound ZSTD_compressBound
#define sph_ZSTD_compressCCtx ZSTD_compressCCtx
#define sph_ZSTD_decompressDCtx ZSTD_decompressDCtx
#define sph_ZSTD_isError ZSTD_isError
#define InitDynamicZstd() ( true )
#endif
class ZstdCompressor
{
int m_iLevel =
#ifdef ZSTD_CLEVEL_DEFAULT
ZSTD_CLEVEL_DEFAULT
#else
3
#endif
;
ZSTD_CCtx *m_pCtxCompress = nullptr;
ZSTD_DCtx *m_pCtxDecompress = nullptr;
protected:
using csize_t = size_t;
ZstdCompressor()
{
m_pCtxCompress = sph_ZSTD_createCCtx();
m_pCtxDecompress = sph_ZSTD_createDCtx();
}
~ZstdCompressor()
{
sph_ZSTD_freeDCtx ( m_pCtxDecompress );
sph_ZSTD_freeCCtx ( m_pCtxCompress );
}
inline csize_t Common_compressBound ( csize_t uSize )
{
return (size_t)sph_ZSTD_compressBound ( uSize );
}
inline int Common_compress ( BYTE* pDest, csize_t* pDestLen, const BYTE* pSource, csize_t uSourceLen ) const
{
auto uSize = sph_ZSTD_compressCCtx ( m_pCtxCompress, pDest, *pDestLen, pSource, uSourceLen, m_iLevel );
*pDestLen = uSize;
return 0;
}
inline bool Common_uncompress ( BYTE* pDest, csize_t* pDestLen, const BYTE* pSource, csize_t uSourceLen )
{
auto iZResult = sph_ZSTD_decompressDCtx ( m_pCtxDecompress, pDest, *pDestLen, pSource, uSourceLen );
if ( sph_ZSTD_isError ( iZResult ) )
return false;
*pDestLen = iZResult;
return true;
}
public:
inline void SetLevel ( int iLevel )
{
m_iLevel = iLevel;
}
};
bool IsZstdCompressionAvailable()
{
static bool bZstdLoaded = false;
if ( !bZstdLoaded )
bZstdLoaded = InitDynamicZstd();
return bZstdLoaded;
}
void MakeZstdMysqlCompressedLayer ( std::unique_ptr<AsyncNetBuffer_c>& pSource, int iLevel )
{
auto pCompressed = std::make_unique<MysqlCompressedSocket_T<ZstdCompressor>> ( std::move ( pSource ) );
pCompressed->SetLevel ( iLevel );
pSource = std::move ( pCompressed );
}
| 3,676
|
C++
|
.cpp
| 99
| 35.272727
| 264
| 0.75267
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,842
|
netreceive_ql.cpp
|
manticoresoftware_manticoresearch/src/netreceive_ql.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "netreceive_ql.h"
#include "coroutine.h"
#include "searchdssl.h"
#include "compressed_zlib_mysql.h"
#include "compressed_zstd_mysql.h"
#include "searchdbuddy.h"
extern int g_iClientQlTimeoutS; // sec
extern volatile bool g_bMaintenance;
extern CSphString g_sMySQLVersion;
constexpr bool bSendOkInsteadofEOF = true; // _if_ client support - send OK packet instead of EOF (in mysql proto).
namespace { // c++ way of 'static'
/// proto details are here: https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_basic_packets.html
inline bool OmitEof() noexcept
{
return bSendOkInsteadofEOF && session::GetDeprecatedEOF();
}
/////////////////////////////////////////////////////////////////////////////
/// how many bytes this int will occupy in proto mysql
template<typename INT>
inline int SqlSizeOf ( INT _iLen ) noexcept
{
auto iLen = (uint64_t)_iLen;
if ( iLen < 251 )
return 1;
if ( iLen <= 0xffff )
return 3;
if ( iLen <= 0xffffff )
return 4;
return 9;
}
/////////////////////////////////////////////////////////////////////////////
/// encodes Mysql Length-coded binary
int MysqlPackInt ( BYTE* pOutput, int64_t iValue )
{
switch ( SqlSizeOf ( iValue ) )
{
case 1: *pOutput = (BYTE)iValue; return 1;
#if USE_LITTLE_ENDIAN
case 3:
*pOutput = (BYTE)'\xFC'; // 252
memcpy ( pOutput + 1, &iValue, 2 );
return 3;
case 4:
*pOutput = (BYTE)'\xFD'; // 253
memcpy ( pOutput + 1, &iValue, 3 );
return 4;
case 9:
default:
*pOutput = (BYTE)'\xFE'; // 254
memcpy ( pOutput + 1, &iValue, 8 );
return 9;
#else
case 3:
pOutput[0] = (BYTE)'\xFC'; // 252
pOutput[1] = (BYTE)iValue;
pOutput[2] = (BYTE)( iValue >> 8 );
return 3;
case 4:
pOutput[0] = (BYTE)'\xFD'; // 253
pOutput[1] = (BYTE)iValue;
pOutput[2] = (BYTE)( iValue >> 8 );
pOutput[3] = (BYTE)( iValue >> 16 );
return 4;
case 9:
default:
pOutput[0] = (BYTE)'\xFE'; // 254
pOutput[1] = (BYTE)iValue;
pOutput[2] = (BYTE)( iValue >> 8 );
pOutput[3] = (BYTE)( iValue >> 16 );
pOutput[4] = (BYTE)( iValue >> 24 );
pOutput[5] = (BYTE)( iValue >> 32 );
pOutput[6] = (BYTE)( iValue >> 40 );
pOutput[7] = (BYTE)( iValue >> 48 );
pOutput[8] = (BYTE)( iValue >> 56 );
return 9;
#endif
}
}
void MysqlSendInt ( ISphOutputBuffer& dOut, int64_t iValue )
{
std::array<BYTE, 10> dBuf; // ok unitialized
auto iLen = MysqlPackInt ( dBuf.data(), iValue );
dOut.SendBytes ( dBuf.data(), iLen );
}
int64_t MysqlReadPackedInt ( InputBuffer_c& tIn )
{
BYTE uVal = tIn.GetByte();
int64_t iRes = 0;
switch ( uVal )
{
case 0xFC:
{
iRes = tIn.GetByte();
iRes += tIn.GetByte() << 8;
return iRes;
}
case 0xFD:
{
iRes = tIn.GetByte();
iRes += tIn.GetByte() << 8;
iRes += tIn.GetByte() << 16;
return iRes;
}
case 0xFE:
{
iRes = tIn.GetByte();
iRes += tIn.GetByte() << 8;
iRes += tIn.GetByte() << 16;
iRes += tIn.GetByte() << 24;
iRes += int64_t ( tIn.GetByte() ) << 32;
iRes += int64_t ( tIn.GetByte() ) << 40;
iRes += int64_t ( tIn.GetByte() ) << 48;
iRes += int64_t ( tIn.GetByte() ) << 56;
return iRes;
}
default:
return int64_t (uVal);
}
}
CSphString MysqlReadSzStr ( InputBuffer_c& tIn )
{
Str_t sData = FromSz ( (const char*)tIn.GetBufferPtr() );
CSphString sResult ( sData );
tIn.SetBufferPos ( tIn.GetBufferPos() + sData.second + 1 ); // +1 to skip z-terminator
return sResult;
}
CSphString MysqlReadVlStr ( InputBuffer_c& tIn )
{
auto iLen = MysqlReadPackedInt ( tIn );
return tIn.GetRawString ( iLen );
}
// RAII Mysql API block: in ctr reserve place for size, in dtr write LSB with size and packet ID
class SQLPacketHeader_c
{
ISphOutputBuffer& m_dOut;
BYTE m_uPacketID;
intptr_t m_iPos;
public:
explicit SQLPacketHeader_c ( ISphOutputBuffer& dOut, BYTE uPacketID = 0 )
: m_dOut ( dOut )
, m_uPacketID ( uPacketID )
, m_iPos { (intptr_t)m_dOut.GetSentCount() }
{
m_dOut.SendLSBDword ( 0 );
}
~SQLPacketHeader_c()
{
auto iBlobLen = m_dOut.GetSentCount() - m_iPos - sizeof ( int );
m_dOut.WriteLSBDword( m_iPos, ( m_uPacketID << 24 ) + iBlobLen );
}
};
//////////////////////////////////////////////////////////////////////////
// MYSQLD PRETENDER
//////////////////////////////////////////////////////////////////////////
struct MYSQL_FLAG
{
static constexpr WORD STATUS_IN_TRANS = 1; // mysql.h: SERVER_STATUS_IN_TRANS
static constexpr WORD STATUS_AUTOCOMMIT = 2; // mysql.h: SERVER_STATUS_AUTOCOMMIT
static constexpr WORD MORE_RESULTS = 8; // mysql.h: SERVER_MORE_RESULTS_EXISTS
};
constexpr int MAX_PACKET_LEN = 0x00FFFFFFL; // 16777215 bytes, max low level packet size. Notice, also used as mask.
struct MYSQL_CHARSET
{
static constexpr BYTE utf8_general_ci = 0x21;
// static constexpr BYTE binary = 0x3f;
};
// our copy of enum_field_types
// we can't rely on mysql_com.h because it might be unavailable
//
// MYSQL_TYPE_DECIMAL = 0
// MYSQL_TYPE_TINY = 1
// MYSQL_TYPE_SHORT = 2
// MYSQL_TYPE_LONG = 3
// MYSQL_TYPE_FLOAT = 4
// MYSQL_TYPE_DOUBLE = 5
// MYSQL_TYPE_NULL = 6
// MYSQL_TYPE_TIMESTAMP = 7
// MYSQL_TYPE_LONGLONG = 8
// MYSQL_TYPE_INT24 = 9
// MYSQL_TYPE_DATE = 10
// MYSQL_TYPE_TIME = 11
// MYSQL_TYPE_DATETIME = 12
// MYSQL_TYPE_YEAR = 13
// MYSQL_TYPE_NEWDATE = 14
// MYSQL_TYPE_VARCHAR = 15
// MYSQL_TYPE_BIT = 16
// MYSQL_TYPE_NEWDECIMAL = 246
// MYSQL_TYPE_ENUM = 247
// MYSQL_TYPE_SET = 248
// MYSQL_TYPE_TINY_BLOB = 249
// MYSQL_TYPE_MEDIUM_BLOB = 250
// MYSQL_TYPE_LONG_BLOB = 251
// MYSQL_TYPE_BLOB = 252
// MYSQL_TYPE_VAR_STRING = 253
// MYSQL_TYPE_STRING = 254
// MYSQL_TYPE_GEOMETRY = 255
struct MYSQL_ERROR
{
static constexpr int MAX_LENGTH = 512;
};
// our copy of enum_server_command
// we can't rely on mysql_com.h because it might be unavailable
//
// MYSQL_COM_SLEEP = 0
// MYSQL_COM_QUIT = 1
// MYSQL_COM_INIT_DB = 2
// MYSQL_COM_QUERY = 3
// MYSQL_COM_FIELD_LIST = 4
// MYSQL_COM_CREATE_DB = 5
// MYSQL_COM_DROP_DB = 6
// MYSQL_COM_REFRESH = 7
// MYSQL_COM_SHUTDOWN = 8
// MYSQL_COM_STATISTICS = 9
// MYSQL_COM_PROCESS_INFO = 10
// MYSQL_COM_CONNECT = 11
// MYSQL_COM_PROCESS_KILL = 12
// MYSQL_COM_DEBUG = 13
// MYSQL_COM_PING = 14
// MYSQL_COM_TIME = 15
// MYSQL_COM_DELAYED_INSERT = 16
// MYSQL_COM_CHANGE_USER = 17
// MYSQL_COM_BINLOG_DUMP = 18
// MYSQL_COM_TABLE_DUMP = 19
// MYSQL_COM_CONNECT_OUT = 20
// MYSQL_COM_REGISTER_SLAVE = 21
// MYSQL_COM_STMT_PREPARE = 22
// MYSQL_COM_STMT_EXECUTE = 23
// MYSQL_COM_STMT_SEND_LONG_DATA = 24
// MYSQL_COM_STMT_CLOSE = 25
// MYSQL_COM_STMT_RESET = 26
// MYSQL_COM_SET_OPTION = 27
// MYSQL_COM_STMT_FETCH = 28
enum
{
MYSQL_COM_QUIT = 1,
MYSQL_COM_INIT_DB = 2,
MYSQL_COM_QUERY = 3,
MYSQL_COM_STATISTICS = 9,
MYSQL_COM_PING = 14,
MYSQL_COM_SET_OPTION = 27
};
static void SendMysqlErrorPacket ( ISphOutputBuffer & tOut, BYTE uPacketID, Str_t sError, EMYSQL_ERR eErr )
{
if ( IsEmpty ( sError ) )
sError = FROMS("(null)");
// cut the error message to fix issue with long message for popular clients
if ( sError.second>MYSQL_ERROR::MAX_LENGTH )
{
sError.second = MYSQL_ERROR::MAX_LENGTH;
char * sErr = const_cast<char *>( sError.first );
sErr[sError.second-3] = '.';
sErr[sError.second-2] = '.';
sErr[sError.second-1] = '.';
sErr[sError.second] = '\0';
}
auto uError = (WORD)eErr; // pretend to be mysql syntax error for now
// send packet header
SQLPacketHeader_c tHdr { tOut, uPacketID };
tOut.SendByte ( 0xff ); // field count, always 0xff for error packet
tOut.SendByte ( (BYTE)( uError & 0xff ) );
tOut.SendByte ( (BYTE)( uError>>8 ) );
// send sqlstate (1 byte marker, 5 byte state)
switch ( eErr )
{
case EMYSQL_ERR::SERVER_SHUTDOWN:
case EMYSQL_ERR::UNKNOWN_COM_ERROR:
tOut.SendBytes ( FROMS ( "#08S01" ) );
break;
case EMYSQL_ERR::NO_SUCH_TABLE:
tOut.SendBytes ( FROMS ( "#42S02" ) );
break;
case EMYSQL_ERR::NO_SUCH_THREAD:
tOut.SendBytes ( FROMS ( "#HY000" ) );
break;
default:
tOut.SendBytes ( FROMS ( "#42000" ) );
break;
}
// send error message
tOut.SendBytes ( sError );
}
inline WORD MysqlStatus ( bool bMoreResults, bool bAutoCommit, bool bIsInTrans )
{
WORD uStatus = 0;
if ( bMoreResults )
uStatus |= MYSQL_FLAG::MORE_RESULTS;
if ( bAutoCommit )
uStatus |= MYSQL_FLAG::STATUS_AUTOCOMMIT;
if ( bIsInTrans )
uStatus |= MYSQL_FLAG::STATUS_IN_TRANS;
return uStatus;
}
/// https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_basic_ok_packet.html
void SendMysqlOkPacketBody ( ISphOutputBuffer& tOut, int iAffectedRows, int iWarns, const char* szMessage, bool bMoreResults, bool bAutoCommit, bool bIsInTrans, int64_t iLastID )
{
MysqlSendInt ( tOut, iAffectedRows );
MysqlSendInt ( tOut, iLastID );
// order of WORDs is opposite to EOF packet below
tOut.SendLSBWord ( MysqlStatus ( bMoreResults, bAutoCommit, bIsInTrans ) );
tOut.SendLSBWord ( iWarns < 0 ? 0 : ( iWarns > 65536 ? 65535 : iWarns ) );
if ( !szMessage )
return;
auto iLen = (int)strlen ( szMessage );
MysqlSendInt ( tOut, iLen );
tOut.SendBytes ( szMessage, iLen );
}
void SendMysqlOkPacket ( ISphOutputBuffer& tOut, BYTE uPacketID, int iAffectedRows, int iWarns, const char* szMessage, bool bMoreResults, bool bAutoCommit, bool bIsInTrans, int64_t iLastID )
{
SQLPacketHeader_c tHdr { tOut, uPacketID };
tOut.SendByte ( 0 ); // ok packet
SendMysqlOkPacketBody ( tOut, iAffectedRows, iWarns, szMessage, bMoreResults, bAutoCommit, bIsInTrans, iLastID );
}
void SendMysqlOkPacket ( ISphOutputBuffer& tOut, BYTE uPacketID, bool bAutoCommit, bool bIsInTrans )
{
SendMysqlOkPacket ( tOut, uPacketID, 0, 0, nullptr, false, bAutoCommit, bIsInTrans, 0 );
}
/// https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_basic_eof_packet.html
void SendMysqlEofPacket ( ISphOutputBuffer & tOut, BYTE uPacketID, int iWarns, bool bMoreResults, bool bAutoCommit, bool bIsInTrans, const char* szMeta = nullptr )
{
SQLPacketHeader_c tHdr { tOut, uPacketID };
tOut.SendByte ( 0xfe );
if ( OmitEof() )
return SendMysqlOkPacketBody ( tOut, 0, iWarns, szMeta, bMoreResults, bAutoCommit, bIsInTrans, 0 );
tOut.SendLSBWord ( iWarns < 0 ? 0 : ( iWarns > 65536 ? 65535 : iWarns ) );
tOut.SendLSBWord ( MysqlStatus ( bMoreResults, bAutoCommit, bIsInTrans ) );
}
//////////////////////////////////////////////////////////////////////////
// Mysql row buffer and command handler
class SqlRowBuffer_c final : public RowBuffer_i, private LazyVector_T<BYTE>
{
BYTE & m_uPacketID;
GenericOutputBuffer_c & m_tOut;
ClientSession_c* m_pSession = nullptr;
size_t m_iTotalSent = 0;
bool m_bWasFlushed = false;
CSphVector<std::pair<CSphString, MysqlColumnType_e>> m_dHead;
// how many bytes this string will occupy in proto mysql
static int SqlStrlen ( const char * sStr )
{
auto iLen = ( int ) strlen ( sStr );
return SqlSizeOf ( iLen ) + iLen;
}
void SendSqlInt ( int iVal )
{
MysqlSendInt ( m_tOut, iVal );
}
void SendSqlString ( const char * sStr )
{
auto iLen = (int) strlen ( sStr );
SendSqlInt ( iLen );
m_tOut.SendBytes ( sStr, iLen );
}
bool SomethingWasSent() final {
auto iPrevSent = std::exchange ( m_iTotalSent, m_tOut.GetTotalSent() + m_tOut.GetSentCount() + GetLength() );
return iPrevSent != m_iTotalSent;
}
void SendSqlFieldPacket ( const char * sCol, MysqlColumnType_e eType, WORD uFlags=0 )
{
const char * sDB = "";
const char * sTable = "";
int iColLen = 0;
switch ( eType )
{
case MYSQL_COL_LONG: iColLen = 11;
break;
case MYSQL_COL_DECIMAL:
case MYSQL_COL_FLOAT:
case MYSQL_COL_DOUBLE:
case MYSQL_COL_UINT64:
case MYSQL_COL_LONGLONG: iColLen = 20;
break;
case MYSQL_COL_STRING: iColLen = 255;
break;
}
SQLPacketHeader_c dBlob { m_tOut, m_uPacketID++ };
SendSqlString ( "def" ); // catalog
SendSqlString ( sDB ); // db
SendSqlString ( sTable ); // table
SendSqlString ( sTable ); // org_table
SendSqlString ( sCol ); // name
SendSqlString ( sCol ); // org_name
m_tOut.SendByte ( 12 ); // filler, must be 12 (following pseudo-string length)
m_tOut.SendByte ( 0x21 ); // charset_nr, 0x21 is utf8
m_tOut.SendByte ( 0 ); // charset_nr
m_tOut.SendLSBDword ( iColLen ); // length
m_tOut.SendByte ( BYTE ( eType ) ); // type (0=decimal)
m_tOut.SendByte ( uFlags & 255 );
m_tOut.SendByte ( uFlags >> 8 );
m_tOut.SendByte ( 0 ); // decimals
m_tOut.SendWord ( 0 ); // filler
}
bool IsAutoCommit() const
{
return !m_pSession || session::IsAutoCommit ( m_pSession );
}
bool IsInTrans () const
{
return m_pSession != nullptr && session::IsInTrans ( m_pSession );
}
public:
SqlRowBuffer_c ( BYTE * pPacketID, GenericOutputBuffer_c * pOut )
: m_uPacketID ( *pPacketID )
, m_tOut ( *pOut )
, m_pSession ( session::GetClientSession() )
{}
void PutFloatAsString ( float fVal, const char * sFormat ) override
{
ReserveGap ( SPH_MAX_NUMERIC_STR );
auto pSize = End();
int iLen = sFormat
? snprintf (( char* ) pSize + 1, SPH_MAX_NUMERIC_STR - 1, sFormat, fVal )
: sph::PrintVarFloat (( char* ) pSize + 1, SPH_MAX_NUMERIC_STR - 1, fVal );
*pSize = BYTE ( iLen );
AddN ( iLen + 1 );
}
void PutDoubleAsString ( double fVal, const char * szFormat ) override
{
ReserveGap ( SPH_MAX_NUMERIC_STR );
auto pSize = End();
int iLen = szFormat
? snprintf (( char* ) pSize + 1, SPH_MAX_NUMERIC_STR - 1, szFormat, fVal )
: sph::PrintVarDouble (( char* ) pSize + 1, SPH_MAX_NUMERIC_STR - 1, fVal );
*pSize = BYTE ( iLen );
AddN ( iLen + 1 );
}
void PutNumAsString ( int64_t iVal ) override
{
ReserveGap ( SPH_MAX_NUMERIC_STR );
auto pSize = End();
#if __has_include( <charconv>)
int iLen = std::to_chars ( (char*)pSize + 1, (char*)pSize + SPH_MAX_NUMERIC_STR, iVal ).ptr - (char*)( pSize + 1 );
#else
int iLen = sph::NtoA ( ( char * ) pSize + 1, iVal );
#endif
*pSize = BYTE ( iLen );
AddN ( iLen + 1 );
}
void PutNumAsString ( uint64_t uVal ) override
{
ReserveGap ( SPH_MAX_NUMERIC_STR );
auto pSize = End();
#if __has_include( <charconv>)
int iLen = std::to_chars ( (char*)pSize + 1, (char*)pSize + SPH_MAX_NUMERIC_STR, uVal ).ptr - (char*)( pSize + 1 );
#else
int iLen = sph::NtoA ( (char*)pSize + 1, uVal );
#endif
*pSize = BYTE ( iLen );
AddN ( iLen + 1 );
}
void PutNumAsString ( int iVal ) override
{
ReserveGap ( SPH_MAX_NUMERIC_STR );
auto pSize = End();
#if __has_include( <charconv>)
int iLen = std::to_chars ( (char*)pSize + 1, (char*)pSize + SPH_MAX_NUMERIC_STR, iVal ).ptr - (char*)( pSize + 1 );
#else
int iLen = sph::NtoA ( ( char * ) pSize + 1, iVal );
#endif
*pSize = BYTE ( iLen );
AddN ( iLen + 1 );
}
void PutNumAsString ( DWORD uVal ) override
{
ReserveGap ( SPH_MAX_NUMERIC_STR );
auto pSize = End();
#if __has_include( <charconv>)
int iLen = std::to_chars ( (char*)pSize + 1, (char*)pSize + SPH_MAX_NUMERIC_STR, uVal ).ptr - (char*)( pSize + 1 );
#else
int iLen = sph::NtoA ( ( char * ) pSize + 1, uVal );
#endif
*pSize = BYTE ( iLen );
AddN ( iLen + 1 );
}
// pack raw array (i.e. packed length, then blob) into proto mysql
void PutArray ( const ByteBlob_t& dBlob, bool bSendEmpty ) override
{
if ( !IsValid ( dBlob ) )
return;
if ( ::IsEmpty ( dBlob ) && bSendEmpty )
{
PutNULL();
return;
}
auto pSpace = AddN ( dBlob.second + 9 ); // 9 is taken from MysqlPack() implementation (max possible offset)
auto iNumLen = MysqlPackInt ( pSpace, dBlob.second );
if ( dBlob.second )
memcpy ( pSpace+iNumLen, dBlob.first, dBlob.second );
Resize ( Idx ( pSpace ) + iNumLen + dBlob.second );
}
// pack string (or "")
void PutString ( Str_t sMsg ) override
{
PutArray ( S2B ( sMsg ), false );
}
void PutMicrosec ( int64_t iUsec ) override
{
iUsec = Max ( iUsec, 0 );
ReserveGap ( SPH_MAX_NUMERIC_STR+1 );
auto pSize = (char*) End();
int iLen = sph::IFtoA ( pSize + 1, iUsec, 6 );
*pSize = BYTE ( iLen );
AddN ( iLen + 1 );
}
void PutNULL () override
{
Add ( 0xfb ); // MySQL NULL is 0xfb at VLB length
}
public:
/// more high level. Processing the whole tables.
// sends collected data, then reset
bool Commit() override
{
if ( m_bError )
return false;
int iLeft = GetLength();
const BYTE * pBuf = Begin();
while ( iLeft )
{
int iSize = Min ( iLeft, MAX_PACKET_LEN );
{ // scope to write header BEFORE possible flush below
SQLPacketHeader_c dBlob { m_tOut, m_uPacketID++ };
m_tOut.SendBytes ( pBuf, iSize );
}
pBuf += iSize;
iLeft -= iSize;
if ( m_tOut.GetSentCount() > MAX_PACKET_LEN )
{
if ( !m_tOut.Flush() )
{
m_bError = true;
return false;
}
m_bWasFlushed = true;
}
}
Resize(0);
return true;
}
// wrappers for popular packets
void Eof ( bool bMoreResults, int iWarns, const char* szMeta ) override
{
SendMysqlEofPacket ( m_tOut, m_uPacketID++, iWarns, bMoreResults, IsAutoCommit(), IsInTrans(), szMeta );
}
using RowBuffer_i::Eof;
void Error ( const char * sError, EMYSQL_ERR eErr ) override
{
m_bError = true;
m_sError = sError;
SendMysqlErrorPacket ( m_tOut, m_uPacketID, FromSz(sError), eErr );
}
void Ok ( int iAffectedRows, int iWarns, const char * sMessage, bool bMoreResults, int64_t iLastInsertId ) override
{
SendMysqlOkPacket ( m_tOut, m_uPacketID, iAffectedRows, iWarns, sMessage, bMoreResults, IsAutoCommit(), IsInTrans(), iLastInsertId );
if ( bMoreResults )
m_uPacketID++;
}
// Header of the table with defined num of columns
inline void HeadBegin ( ) override
{
m_dHead.Reset();
}
bool HeadEnd ( bool bMoreResults, int iWarns ) override
{
{
SQLPacketHeader_c dHead { m_tOut, m_uPacketID++ };
SendSqlInt ( m_dHead.GetLength() );
}
for ( const auto& dCol : m_dHead )
SendSqlFieldPacket ( dCol.first.cstr(), dCol.second );
if ( !OmitEof() )
Eof ( bMoreResults, iWarns );
Resize(0);
m_dHead.Reset();
return true;
}
// add the next column. The EOF after the tull set will be fired automatically
void HeadColumn ( const char * sName, MysqlColumnType_e uType ) override
{
m_dHead.Add ( { sName, uType } );
}
void Add ( BYTE uVal ) override
{
LazyVector_T<BYTE>::Add ( uVal );
}
[[nodiscard]] bool WasFlushed() const noexcept { return m_bWasFlushed; }
[[nodiscard]] std::pair<int, BYTE> GetCurrentPositionState() noexcept
{
// we track flushes just for current position (that is - flushing invalidates position)
m_bWasFlushed = false;
return { m_tOut.GetSentCount(), m_uPacketID };
};
void ResetToPositionState ( std::pair<int, BYTE> tPoint )
{
assert ( !m_bWasFlushed && "Can't rewind already flushed stream!");
// reset to initial state (as after ctr)
RowBuffer_i::Reset();
LazyVector_T<BYTE>::Reset();
m_pSession = session::GetClientSession();
m_iTotalSent = 0;
m_bWasFlushed = false;
// rewind stream and packetID
assert ( !m_bError );
m_tOut.Rewind ( tPoint.first );
m_uPacketID = tPoint.second;
}
};
struct CLIENT
{
// see https://dev.mysql.com/doc/dev/mysql-server/latest/group__group__cs__capabilities__flags.html for reference
// we use same non-consistent definitions to match the reference (i.e. some constants defined as decimals, some as (1UL << X). Just keep it for easier match with ref page).
static constexpr DWORD CONNECT_WITH_DB = 8;
static constexpr DWORD COMPRESS = 32;
static constexpr DWORD PROTOCOL_41 = 512;
static constexpr DWORD SSL = 2048;
// static constexpr DWORD RESERVED = 16384; // DEPRECATED: Old flag for 4.1 protocol
static constexpr DWORD RESERVED2 = 32768; // DEPRECATED: Old flag for 4.1 authentication \ CLIENT_SECURE_CONNECTION.
static constexpr DWORD MULTI_RESULTS = ( 1UL << 17 );
// static constexpr DWORD PS_MULTI_RESULTS = ( 1UL << 18 );
static constexpr DWORD PLUGIN_AUTH = ( 1UL << 19 );
static constexpr DWORD CONNECT_ATTRS = ( 1UL << 20 );
static constexpr DWORD PLUGIN_AUTH_LENENC_CLIENT_DATA = ( 1UL << 21 );
static constexpr DWORD DEPRECATE_EOF = ( 1UL << 24 );
static constexpr DWORD ZSTD_COMPRESSION_ALGORITHM = ( 1UL << 26 );
};
// handshake package we send to client
class HandshakeV10_c
{
static constexpr BYTE AUTH_DATA_LEN = 21;
const BYTE m_uVersion = 0x0A; // protocol version 10
const BYTE m_uCharSet = MYSQL_CHARSET::utf8_general_ci;
const WORD m_uServerStatusFlag = MYSQL_FLAG::STATUS_AUTOCOMMIT;
const Str_t m_sAuthPluginName { "mysql_native_password", 22 };
Str_t m_sVersionString;
DWORD m_uConnID;
std::array<char, AUTH_DATA_LEN> m_sAuthData {};
DWORD m_uCapabilities = CLIENT::CONNECT_WITH_DB
| CLIENT::PROTOCOL_41
| CLIENT::RESERVED2 // deprecated
// | CLIENT::RESERVED
| CLIENT::MULTI_RESULTS
| CLIENT::PLUGIN_AUTH
| CLIENT::CONNECT_ATTRS
| ( bSendOkInsteadofEOF ? CLIENT::DEPRECATE_EOF : 0 );
public:
explicit HandshakeV10_c( DWORD uConnID )
: m_uConnID ( uConnID )
{
static bool bExtraCapabilitiesSet = false;
static WORD uExtraCapabilities = 0;
if ( !bExtraCapabilitiesSet )
{
uExtraCapabilities = dwval_from_env ( "MANTICORE_MYSQL_EXTRA_CAPABILITIES", 0 );
bExtraCapabilitiesSet = true;
}
m_uCapabilities |= uExtraCapabilities;
// fill scramble auth data (random)
DWORD i = 0;
DWORD uRand = sphRand() | 0x01010101;
for ( ; i < AUTH_DATA_LEN - sizeof ( DWORD ); i += sizeof ( DWORD ) )
{
memcpy ( m_sAuthData.data() + i, &uRand, sizeof ( DWORD ) );
uRand = sphRand() | 0x01010101;
}
if ( i < AUTH_DATA_LEN )
memcpy ( m_sAuthData.data() + i, &uRand, AUTH_DATA_LEN - i );
memset ( m_sAuthData.data() + AUTH_DATA_LEN - 1, 0, 1);
// version string (plus 0-terminator)
m_sVersionString = FromStr ( g_sMySQLVersion );
++m_sVersionString.second; // encount also z-terminator
}
void SetCanSsl ( bool bCan )
{
if ( bCan )
m_uCapabilities |= CLIENT::SSL;
}
void SetCanZlib( bool bCan )
{
if ( bCan )
m_uCapabilities |= CLIENT::COMPRESS;
}
void SetCanZstd ( bool bCan )
{
if ( bCan )
m_uCapabilities |= CLIENT::ZSTD_COMPRESSION_ALGORITHM;
}
void Send ( ISphOutputBuffer& tOut )
{
// see https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_connection_phase_packets_protocol_handshake_v10.html for reference
constexpr int iFillerSize = 10;
const std::array<BYTE, iFillerSize> dFiller { 0 };
sphLogDebugv ( "Sending handshake..." );
SQLPacketHeader_c tHeader { tOut };
// Protocol::HandshakeV10
tOut.SendByte ( m_uVersion );
tOut.SendBytes ( m_sVersionString );
tOut.SendLSBDword ( m_uConnID );
tOut.SendBytes ( m_sAuthData.data(), 8 );
tOut.SendByte ( 0 );
tOut.SendLSBWord ( m_uCapabilities & 0xFFFF );
tOut.SendByte ( m_uCharSet );
tOut.SendLSBWord ( m_uServerStatusFlag );
tOut.SendLSBWord ( m_uCapabilities >> 16 );
tOut.SendByte ( AUTH_DATA_LEN );
tOut.SendBytes ( dFiller.data(), iFillerSize );
tOut.SendBytes ( &m_sAuthData[8], AUTH_DATA_LEN - 8 );
tOut.SendBytes ( m_sAuthPluginName );
}
};
// HandshakeResponse truncated right before username field
class HandshakeResponse41
{
CSphString m_sLoginUserName;
CSphString m_sAuthResponse;
CSphString m_sDatabase;
CSphString m_sClientPluginName;
SmallStringHash_T<CSphString> m_hAttributes;
DWORD m_uCapabilities;
DWORD m_uMaxPacketSize;
BYTE m_uCharset;
BYTE m_uCompressionLevel = 0;
public:
// see https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_connection_phase_packets_protocol_handshake_response.html for ref
explicit HandshakeResponse41 ( AsyncNetInputBuffer_c& tRawIn, int iPacketLen )
{
InputBuffer_c tIn { tRawIn.PopTail ( iPacketLen ) };
m_uCapabilities = tIn.GetLSBDword();
assert ( m_uCapabilities & CLIENT::PROTOCOL_41 );
m_uMaxPacketSize = tIn.GetLSBDword();
m_uCharset = tIn.GetByte();
tIn.SetBufferPos ( tIn.GetBufferPos() + 23 );
sphLogDebugv ( "HandshakeResponse41. PackedLen=%d, hasBytes=%d", iPacketLen, tIn.HasBytes() );
// ssl auth is finished here
if ( tIn.HasBytes() <=0 )
return;
// login name
m_sLoginUserName = MysqlReadSzStr ( tIn );
sphLogDebugv ( "User: %s", m_sLoginUserName.cstr() );
// auth
if ( m_uCapabilities & CLIENT::PLUGIN_AUTH_LENENC_CLIENT_DATA )
m_sAuthResponse = MysqlReadVlStr ( tIn );
else
{
auto uLen = tIn.GetByte();
m_sAuthResponse = tIn.GetRawString ( uLen );
}
// db name
if ( m_uCapabilities & CLIENT::CONNECT_WITH_DB )
{
m_sDatabase = MysqlReadSzStr ( tIn );
sphLogDebugv ( "DB: %s", m_sDatabase.cstr() );
}
// db name
if ( m_uCapabilities & CLIENT::PLUGIN_AUTH )
m_sClientPluginName = MysqlReadSzStr ( tIn );
// attributes
if ( m_uCapabilities & CLIENT::CONNECT_ATTRS )
{
auto iWatermark = MysqlReadPackedInt ( tIn );
sphLogDebugv ( "%d bytes of attrs", (int) iWatermark );
iWatermark = tIn.HasBytes() - iWatermark;
while ( iWatermark < tIn.HasBytes() )
{
auto sKey = MysqlReadVlStr ( tIn );
auto sVal = MysqlReadVlStr ( tIn );
sphLogDebugv ( "%s: %s", sKey.cstr(), sVal.cstr() );
m_hAttributes.Add ( std::move ( sVal ), sKey );
}
}
// compression level
if ( tIn.HasBytes()>0 )
m_uCompressionLevel = tIn.GetByte();
}
[[nodiscard]] inline const CSphString& GetUsername() const noexcept
{
return m_sLoginUserName;
}
[[nodiscard]] inline const CSphString& GetDB() const noexcept
{
return m_sDatabase;
}
[[nodiscard]] inline bool WantSSL() const noexcept
{
return ( m_uCapabilities & CLIENT::SSL ) != 0;
}
[[nodiscard]] inline bool WantZlib() const noexcept
{
return ( m_uCapabilities & CLIENT::COMPRESS ) != 0;
}
[[nodiscard]] inline bool WantZstd() const noexcept
{
return ( m_uCapabilities & CLIENT::ZSTD_COMPRESSION_ALGORITHM ) != 0;
}
[[nodiscard]] inline int WantZstdLev() const noexcept
{
return m_uCompressionLevel;
}
[[nodiscard]] inline bool DeprecateEOF() const noexcept
{
return ( m_uCapabilities & CLIENT::DEPRECATE_EOF ) != 0;
}
};
static bool LoopClientMySQL ( BYTE & uPacketID, int iPacketLen, QueryProfile_c * pProfile, AsyncNetBuffer_c * pBuf )
{
auto& tSess = session::Info();
assert ( pBuf );
auto& tIn = *(AsyncNetInputBuffer_c *) pBuf;
auto& tOut = *(GenericOutputBuffer_c *) pBuf;
auto uHasBytesIn = tIn.HasBytes ();
// get command, handle special packets
const BYTE uMysqlCmd = tIn.GetByte ();
if ( uMysqlCmd!=MYSQL_COM_QUERY )
sphLogDebugv ( "LoopClientMySQL command %d", uMysqlCmd );
if ( uMysqlCmd==MYSQL_COM_QUIT )
return false;
bool bKeepProfile = true;
switch ( uMysqlCmd )
{
case MYSQL_COM_PING:
case MYSQL_COM_INIT_DB:
// client wants a pong
SendMysqlOkPacket ( tOut, uPacketID, session::IsAutoCommit(), session::IsInTrans() );
break;
case MYSQL_COM_SET_OPTION:
// bMulti = ( tIn.GetWord()==MYSQL_OPTION_MULTI_STATEMENTS_ON ); // that's how we could double check and validate multi query
// server reporting success in response to COM_SET_OPTION and COM_DEBUG
SendMysqlEofPacket ( tOut, uPacketID, 0, false, session::IsAutoCommit (), session::IsInTrans() );
break;
case MYSQL_COM_STATISTICS:
{
StringBuilder_c sStats;
BuildStatusOneline ( sStats );
SQLPacketHeader_c dBlob { tOut, uPacketID };
tOut.SendBytes ( sStats );
break;
}
case MYSQL_COM_QUERY:
{
// handle query packet
Str_t tSrcQueryReference ( nullptr, iPacketLen-1 );
tIn.GetBytesZerocopy ( ( const BYTE ** )( &tSrcQueryReference.first ), tSrcQueryReference.second );
// string created from the tSrcQueryReference data got moved into myinfo then could be changed during query parsing
myinfo::SetDescription ( CSphString ( tSrcQueryReference ), tSrcQueryReference.second ); // OPTIMIZE? could be huge, but string is hazard.
AT_SCOPE_EXIT ( []() { myinfo::SetDescription ( {}, 0 ); } );
assert ( !tIn.GetError() );
sphLogDebugv ( "LoopClientMySQL command %d, '%s'", uMysqlCmd, myinfo::UnsafeDescription().first );
tSess.SetTaskState ( TaskState_e::QUERY );
SqlRowBuffer_c tRows ( &uPacketID, &tOut );
tSess.m_pSqlRowBuffer = &tRows;
auto tStoredPos = tRows.GetCurrentPositionState();
bKeepProfile = session::Execute ( myinfo::UnsafeDescription(), tRows );
if ( tRows.IsError() )
{
if ( !HasBuddy() || tRows.WasFlushed() )
{
LogSphinxqlError ( myinfo::UnsafeDescription().first, FromStr ( tRows.GetError() ) );
if ( tRows.WasFlushed() )
sphLogDebug ( "Can't invoke buddy, because output socket was flushed; unable to rewind/overwrite anything" );
} else
{
ProcessSqlQueryBuddy ( tSrcQueryReference, FromStr ( tRows.GetError() ), tStoredPos, uPacketID, tOut );
}
}
}
break;
default:
// default case, unknown command
StringBuilder_c sError;
sError << "unknown command (code=" << uMysqlCmd << ")";
LogSphinxqlError ( "", Str_t ( sError ) );
SendMysqlErrorPacket ( tOut, uPacketID, Str_t(sError), EMYSQL_ERR::UNKNOWN_COM_ERROR );
break;
}
auto uBytesConsumed = uHasBytesIn - tIn.HasBytes ();
if ( uBytesConsumed<iPacketLen )
{
uBytesConsumed = iPacketLen - uBytesConsumed;
sphLogDebugv ( "LoopClientMySQL disposing unused %d bytes", uBytesConsumed );
const BYTE* pFoo = nullptr;
tIn.GetBytesZerocopy (&pFoo, uBytesConsumed);
}
// send the response packet
tSess.SetTaskState ( TaskState_e::NET_WRITE );
if ( !tOut.Flush () )
return false;
// finalize query profile
if ( pProfile )
pProfile->Stop();
if ( uMysqlCmd==MYSQL_COM_QUERY && bKeepProfile )
session::SaveLastProfile();
tOut.SetProfiler ( nullptr );
return true;
}
} // static namespace
// that is used from sphinxql command over API
void RunSingleSphinxqlCommand ( Str_t sCommand, GenericOutputBuffer_c & tOut )
{
BYTE uDummy = 0;
SqlRowBuffer_c tRows ( &uDummy, &tOut );
session::Execute ( sCommand, tRows );
}
// add 'compressed' flag
struct QlCompressedInfo_t : public TaskInfo_t
{
DECLARE_RENDER( QlCompressedInfo_t );
bool m_bCompressed = false;
};
DEFINE_RENDER( QlCompressedInfo_t )
{
auto & tInfo = *(QlCompressedInfo_t *)const_cast<void*>(pSrc);
if ( tInfo.m_bCompressed )
{
dDst.m_sProto << "compressed";
dDst.m_sChain << "gzip ";
}
}
// main sphinxql server
void SqlServe ( std::unique_ptr<AsyncNetBuffer_c> pBuf )
{
auto& tSess = session::Info();
// to display 'compressed' flag, if any.
auto pCompressedFlag = PublishTaskInfo ( new QlCompressedInfo_t );
// non-vip connections in maintainance should be already rejected on accept
assert ( !g_bMaintenance || tSess.GetVip() );
// set off query guard
GlobalCrashQueryGetRef ().m_eType = QUERY_SQL;
const bool bCanZlibCompression = IsZlibCompressionAvailable();
const bool bCanZstdCompression = IsZstdCompressionAvailable();
int iCID = tSess.GetConnID();
const char * sClientIP = tSess.szClientName();
GenericOutputBuffer_c* pOut = pBuf.get();
AsyncNetInputBuffer_c* pIn = pBuf.get();
/// mysql is pro-active, we NEED to send handshake before client send us something.
/// So, no passive probing possible.
// send handshake first
tSess.SetTaskState ( TaskState_e::HANDSHAKE );
HandshakeV10_c tHandshake ( iCID );
tHandshake.SetCanSsl ( CheckWeCanUseSSL() ); // fixme! SSL capability must be set only if keys are valid!
tHandshake.SetCanZlib( bCanZlibCompression );
tHandshake.SetCanZstd( bCanZstdCompression );
tHandshake.Send ( *pOut );
tSess.SetTaskState ( TaskState_e::NET_WRITE );
if ( !pOut->Flush () )
{
int iErrno = sphSockGetErrno ();
sphWarning ( "failed to send server version (client=%s(%d), error: %d '%s')",
sClientIP, iCID, iErrno, sphSockError ( iErrno ) );
return;
}
CSphString sError;
bool bAuthed = false;
BYTE uPacketID = 1;
int iPacketLen;
int iTimeoutS = -1;
int iWTimeoutS = -1;
do
{
tSess.SetKilled ( false );
// check for updated timeout
auto iCurrentTimeout = tSess.GetTimeoutS(); // by default -1, means 'default'
if ( iCurrentTimeout<0 )
iCurrentTimeout = g_iClientQlTimeoutS;
if ( iCurrentTimeout!=iTimeoutS )
{
iTimeoutS = iCurrentTimeout;
pIn->SetTimeoutUS ( S2US * iTimeoutS );
}
iCurrentTimeout = tSess.GetWTimeoutS(); // by default -1, means 'default'
if ( iCurrentTimeout < 0 )
iCurrentTimeout = g_iClientQlTimeoutS;
if ( iCurrentTimeout != iWTimeoutS )
{
iWTimeoutS = iCurrentTimeout;
pOut->SetWTimeoutUS( S2US * iWTimeoutS );
}
pIn->DiscardProcessed ();
iPacketLen = 0;
// get next packet
// we want interruptible calls here, so that shutdowns could be honored
sphLogDebugv ( "Receiving command... %d bytes in buf", pIn->HasBytes() );
// setup per-query profiling
auto pProfile = session::StartProfiling ( SPH_QSTATE_TOTAL );
if ( pProfile )
pOut->SetProfiler ( pProfile );
int iChunkLen = MAX_PACKET_LEN;
auto iStartPacketPos = pIn->GetBufferPos ();
while (iChunkLen==MAX_PACKET_LEN)
{
// inlined AsyncReadMySQLPacketHeader
if ( !pIn->ReadFrom ( iPacketLen+4 ))
{
// if there was eof, we're done from
// comment from the SyncSockRead
// while we wait the start of the packet - is ok to quit but right way is to send MYSQL_COM_QUERY
bool bNotError = ( !iPacketLen );
sError.SetSprintf ( "bailing on failed MySQL header, %s", ( pIn->GetError() ? pIn->GetErrorMessage().cstr() : sphSockError() ) );
// still want to log this even into logdebugv along with all other net events
LogNetError ( sError.cstr(), bNotError );
if ( !bNotError )
{
SendMysqlErrorPacket ( *pOut, uPacketID, FromStr ( sError ), EMYSQL_ERR::UNKNOWN_COM_ERROR );
pOut->Flush ();
}
return;
}
pIn->SetBufferPos ( iStartPacketPos + iPacketLen ); // will read at the end of the buffer
DWORD uAddon = pIn->GetLSBDword ();
pIn->DiscardProcessed ( sizeof ( uAddon )); // move out this header to keep rest of the buff solid
pIn->SetBufferPos ( iStartPacketPos ); // rewind back after the read.
uPacketID = 1+(BYTE) ( uAddon >> 24 );
iChunkLen = ( uAddon & MAX_PACKET_LEN );
sphLogDebugv ( "AsyncReadMySQLPacketHeader returned %d len...", iChunkLen );
iPacketLen += iChunkLen;
if ( !bAuthed && ( uAddon == SPHINX_CLIENT_VERSION || uAddon == 0x01000000UL ) )
{
sphLogDebug ( "conn %d from %s: seems, that non-mysql proto (sphinx?) packet received (%x). M.b. you've confused remote port in distributed table definition?", iCID, sClientIP, uAddon );
return;
}
// receive package body
if ( !pIn->ReadFrom ( iPacketLen ))
{
sError.SetSprintf ( "failed to receive MySQL request body, expected length %d, %s", iPacketLen, ( pIn->GetError() ? pIn->GetErrorMessage().cstr() : sphSockError() ) );
LogNetError ( sError.cstr() );
SendMysqlErrorPacket ( *pOut, uPacketID, FromStr ( sError ), EMYSQL_ERR::UNKNOWN_COM_ERROR );
pOut->Flush ();
return;
}
}
SwitchProfile ( pProfile, SPH_QSTATE_UNKNOWN );
// handle auth packet
if ( !bAuthed )
{
tSess.SetTaskState ( TaskState_e::HANDSHAKE );
HandshakeResponse41 tResponse ( *pIn, iPacketLen );
// switch to ssl by demand.
// You need to set a bit in handshake (g_sMysqlHandshake) in order to suggest client such switching.
// Client set this desirable bit only if we say that 'we can' about it before.
if ( !tSess.GetSsl() && tResponse.WantSSL() ) // want SSL
{
tSess.SetSsl ( MakeSecureLayer ( pBuf ) );
pOut = pBuf.get();
pIn = pBuf.get();
tSess.SetPersistent( !pOut->GetError () );
continue; // next packet will be 'login' again, but received over SSL
}
if ( IsMaxedOut() )
{
LogNetError ( g_sMaxedOutMessage.first );
SendMysqlErrorPacket ( *pOut, uPacketID, g_sMaxedOutMessage, EMYSQL_ERR::UNKNOWN_COM_ERROR );
pOut->Flush ();
gStats().m_iMaxedOut.fetch_add ( 1, std::memory_order_relaxed );
break;
}
if ( tResponse.GetUsername() == "FEDERATED" )
session::SetFederatedUser();
session::SetUser ( tResponse.GetUsername() );
SendMysqlOkPacket ( *pOut, uPacketID, session::IsAutoCommit(), session::IsInTrans ());
tSess.SetPersistent ( pOut->Flush () );
bAuthed = true;
session::SetDeprecatedEOF ( tResponse.DeprecateEOF() );
if ( bCanZstdCompression && tResponse.WantZstd() )
{
MakeZstdMysqlCompressedLayer ( pBuf, tResponse.WantZstdLev() );
pOut = pBuf.get();
pIn = pBuf.get();
pCompressedFlag->m_bCompressed = true;
}
else if ( bCanZlibCompression && tResponse.WantZlib() )
{
MakeZlibMysqlCompressedLayer ( pBuf );
pOut = pBuf.get();
pIn = pBuf.get();
pCompressedFlag->m_bCompressed = true;
}
continue;
}
tSess.SetPersistent ( LoopClientMySQL ( uPacketID, iPacketLen, pProfile, pBuf.get() ) );
pBuf->SyncErrorState();
if ( pIn->GetError() )
LogNetError ( pIn->GetErrorMessage().cstr() );
pBuf->ResetError();
} while ( tSess.GetPersistent() );
}
RowBuffer_i * CreateSqlRowBuffer ( BYTE * pPacketID, GenericOutputBuffer_c * pOut )
{
return new SqlRowBuffer_c ( pPacketID, pOut );
}
| 37,190
|
C++
|
.cpp
| 1,097
| 31.161349
| 190
| 0.684256
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,843
|
jsonqueryfilter.cpp
|
manticoresoftware_manticoresearch/src/jsonqueryfilter.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "jsonqueryfilter.h"
#include "sphinxint.h"
#include "geodist.h"
#include <time.h>
#include "datetime.h"
static const char * g_szFilter = "_@filter_";
static bool ParseLatLon ( const JsonObj_c & tLat, const JsonObj_c & tLon, LocationField_t * pField, LocationSource_t * pSource, CSphString & sError )
{
if ( !tLat || !tLon )
{
if ( !tLat && !tLon )
sError = R"("lat" and "lon" properties missing)";
else
sError.SetSprintf ( R"("%s" property missing)", ( !tLat ? "lat" : "lon" ) );
return false;
}
bool bParseField = !!pField;
bool bLatChecked = bParseField ? tLat.IsNum() : tLat.IsStr();
bool bLonChecked = bParseField ? tLon.IsNum() : tLon.IsStr();
if ( !bLatChecked || !bLonChecked )
{
if ( !bLatChecked && !bLonChecked )
sError.SetSprintf ( R"("lat" and "lon" property values should be %s)", ( bParseField ? "numeric" : "string" ) );
else
sError.SetSprintf ( R"("%s" property value should be %s)", ( !bLatChecked ? "lat" : "lon" ), ( bParseField ? "numeric" : "string" ) );
return false;
}
if ( bParseField )
{
pField->m_fLat = tLat.FltVal();
pField->m_fLon = tLon.FltVal();
} else
{
pSource->m_sLat = tLat.StrVal();
pSource->m_sLon = tLon.StrVal();
}
return true;
}
static bool ParseLocation ( const char * sName, const JsonObj_c & tLoc, LocationField_t * pField, LocationSource_t * pSource, CSphString & sError )
{
bool bParseField = !!pField;
assert ( ( bParseField && pField ) || pSource );
bool bObj = tLoc.IsObj();
bool bString = tLoc.IsStr();
bool bArray = tLoc.IsArray();
if ( !bObj && !bString && !bArray )
{
sError.SetSprintf ( "\"%s\" property value should be either an object or a string or an array", sName );
return false;
}
if ( bObj )
return ParseLatLon ( tLoc.GetItem("lat"), tLoc.GetItem("lon"), pField, pSource, sError );
if ( bString )
{
StrVec_t dVals;
sphSplit ( dVals, tLoc.SzVal() );
if ( dVals.GetLength()!=2 )
{
sError.SetSprintf ( "\"%s\" property values should be sting with lat,lon items, got %d items", sName, dVals.GetLength() );
return false;
}
// string and array order differs
// string - lat, lon
// array - lon, lat
int iLatLen = dVals[0].Length();
int iLonLen = dVals[1].Length();
if ( !iLatLen || !iLonLen )
{
if ( !iLatLen && !iLonLen )
sError.SetSprintf ( R"("lat" and "lon" values should be %s)", ( bParseField ? "numeric" : "string" ) );
else
sError.SetSprintf ( "\"%s\" value should be %s", ( !iLatLen ? "lat" : "lon" ), ( bParseField ? "numeric" : "string" ) );
return false;
}
if ( bParseField )
{
pField->m_fLat = (float)atof ( dVals[0].cstr() );
pField->m_fLon = (float)atof ( dVals[1].cstr() );
} else
{
pSource->m_sLat = dVals[0];
pSource->m_sLon = dVals[1];
}
return true;
}
assert ( bArray );
int iCount = tLoc.Size();
if ( iCount!=2 )
{
sError.SetSprintf ( "\"%s\" property values should be an array with lat,lon items, got %d items", sName, iCount );
return false;
}
// string and array order differs
// array - lon, lat
// string - lat, lon
return ParseLatLon ( tLoc[1], tLoc[0], pField, pSource, sError );
}
//////////////////////////////////////////////////////////////////////////
bool GeoDistInfo_c::Parse ( const JsonObj_c & tRoot, bool bNeedDistance, CSphString & sError, CSphString & sWarning )
{
JsonObj_c tLocAnchor = tRoot.GetItem("location_anchor");
JsonObj_c tLocSource = tRoot.GetItem("location_source");
if ( !tLocAnchor || !tLocSource )
{
if ( !tLocAnchor && !tLocSource )
sError = R"("location_anchor" and "location_source" properties missing)";
else
sError.SetSprintf ( "\"%s\" property missing", ( !tLocAnchor ? "location_anchor" : "location_source" ) );
return false;
}
if ( !ParseLocation ( "location_anchor", tLocAnchor, &m_tLocAnchor, nullptr, sError )
|| !ParseLocation ( "location_source", tLocSource, nullptr, &m_tLocSource, sError ) )
return false;
JsonObj_c tType = tRoot.GetStrItem ( "distance_type", sError, true );
if ( tType )
{
CSphString sType = tType.StrVal();
if ( sType!="adaptive" && sType!="haversine" )
{
sWarning.SetSprintf ( R"("distance_type" property type is invalid: "%s", defaulting to "adaptive")", sType.cstr() );
m_bGeodistAdaptive = true;
} else
m_bGeodistAdaptive = sType=="adaptive";
} else if ( !sError.IsEmpty() )
return false;
JsonObj_c tDistance = tRoot.GetItem("distance");
if ( tDistance )
{
if ( !ParseDistance ( tDistance, sError ) )
return false;
} else if ( bNeedDistance )
{
sError = "\"distance\" not specified";
return false;
}
m_bGeodist = true;
return true;
}
bool GeoDistInfo_c::ParseDistance ( const JsonObj_c & tDistance, CSphString & sError )
{
if ( tDistance.IsNum() )
{
// no units specified, meters assumed
m_fDistance = tDistance.FltVal();
return true;
}
if ( !tDistance.IsStr() )
{
sError = "\"distance\" property should be a number or a string";
return false;
}
const char * p = tDistance.SzVal();
assert ( p );
while ( *p && sphIsSpace(*p) )
p++;
const char * szNumber = p;
while ( *p && ( *p=='.' || ( *p>='0' && *p<='9' ) ) )
p++;
CSphString sNumber;
sNumber.SetBinary ( szNumber, int ( p-szNumber ) );
while ( *p && sphIsSpace(*p) )
p++;
const char * szUnit = p;
while ( *p && sphIsAlpha(*p) )
p++;
CSphString sUnit;
sUnit.SetBinary ( szUnit, int ( p-szUnit ) );
m_fDistance = (float)atof ( sNumber.cstr() );
float fCoeff = 1.0f;
if ( !GeoDistanceUnit ( sUnit.cstr(), fCoeff ) )
{
sError.SetSprintf ( "unknown distance unit: %s", sUnit.cstr() );
return false;
}
m_fDistance *= fCoeff;
return true;
}
CSphString GeoDistInfo_c::BuildExprString() const
{
CSphString sResult;
sResult.SetSprintf ( "GEODIST(%f, %f, %s, %s, {in=deg, out=m, method=%s})", m_tLocAnchor.m_fLat, m_tLocAnchor.m_fLon, m_tLocSource.m_sLat.cstr(), m_tLocSource.m_sLon.cstr(), m_bGeodistAdaptive ? "adaptive" : "haversine" );
return sResult;
}
//////////////////////////////////////////////////////////////////////////
const char * GetFilterAttrPrefix()
{
return g_szFilter;
}
bool IsFilter ( const JsonObj_c & tJson )
{
if ( !tJson )
return false;
CSphString sName = tJson.Name();
if ( sName=="equals" )
return true;
if ( sName=="range" )
return true;
if ( sName=="in" )
return true;
if ( sName=="geo_distance" )
return true;
if ( sName=="exists" )
return true;
return false;
}
static void AddToSelectList ( CSphQuery & tQuery, const CSphVector<CSphQueryItem> & dItems, int iFirstItem = 0 )
{
for ( int i = iFirstItem; i < dItems.GetLength(); i++ )
tQuery.m_sSelect.SetSprintf ( "%s, %s as %s", tQuery.m_sSelect.cstr(), dItems[i].m_sExpr.cstr(), dItems[i].m_sAlias.cstr() );
}
static ESphAttr Json2AttrType ( const JsonObj_c & tJson )
{
if ( tJson.IsInt() ) return SPH_ATTR_BIGINT;
if ( tJson.IsDbl() ) return SPH_ATTR_FLOAT;
if ( tJson.IsBool() ) return SPH_ATTR_BOOL;
if ( tJson.IsStr() ) return SPH_ATTR_STRING;
return SPH_ATTR_NONE;
}
struct FilterTreeNode_t
{
std::unique_ptr<FilterTreeNode_t> m_pLeft;
std::unique_ptr<FilterTreeNode_t> m_pRight;
std::unique_ptr<CSphFilterSettings> m_pFilter;
bool m_bOr = false;
};
class FilterTreeConstructor_c
{
public:
FilterTreeConstructor_c ( CSphQuery & tQuery, CSphString & sError, CSphString & sWarning );
bool Parse ( const JsonObj_c & tObj );
private:
CSphQuery & m_tQuery;
CSphString & m_sError;
CSphString & m_sWarning;
int m_iQueryItemId = 0;
std::pair<bool, std::unique_ptr<FilterTreeNode_t>> ConstructBoolFilters ( const JsonObj_c & tBool );
std::pair<bool, std::unique_ptr<FilterTreeNode_t>> ConstructPlainFilters ( const JsonObj_c & tObj );
std::pair<bool, std::unique_ptr<FilterTreeNode_t>> ConstructBoolNodeFilters ( const JsonObj_c & tClause, CSphVector<CSphQueryItem> & dQueryItems, bool bOr );
std::unique_ptr<FilterTreeNode_t> ConstructInFilter ( const JsonObj_c & tJson );
std::unique_ptr<FilterTreeNode_t> ConstructGeoFilter ( const JsonObj_c & tJson, CSphVector<CSphQueryItem> & dQueryItems );
std::unique_ptr<FilterTreeNode_t> ConstructFilter ( const JsonObj_c & tJson, CSphVector<CSphQueryItem> & dQueryItems );
std::unique_ptr<FilterTreeNode_t> ConstructEqualsFilter ( const JsonObj_c & tJson );
std::unique_ptr<FilterTreeNode_t> ConstructRangeFilter ( const JsonObj_c & tJson );
std::unique_ptr<FilterTreeNode_t> ConstructExistsFilter ( const JsonObj_c & tJson );
JsonObj_c GetFilterColumn ( const JsonObj_c & tJson );
};
template <typename T>
static void WalkTree ( std::unique_ptr<FilterTreeNode_t> & pRoot, T && fnAction )
{
if ( !pRoot )
return;
fnAction(pRoot);
WalkTree ( pRoot->m_pLeft, fnAction );
WalkTree ( pRoot->m_pRight, fnAction );
}
static int CreateFilterTree ( std::unique_ptr<FilterTreeNode_t> & pRoot, CSphVector<CSphFilterSettings> & dFilters, CSphVector<FilterTreeItem_t> & dFilterTree )
{
if ( !pRoot )
return -1;
int iLeft = CreateFilterTree ( pRoot->m_pLeft, dFilters, dFilterTree );
int iRight = CreateFilterTree ( pRoot->m_pRight, dFilters, dFilterTree );
FilterTreeItem_t & tNew = dFilterTree.Add();
tNew.m_bOr = pRoot->m_bOr;
if ( pRoot->m_pFilter )
{
tNew.m_iFilterItem = dFilters.GetLength();
dFilters.Add ( std::move ( *pRoot->m_pFilter ) );
}
else
{
tNew.m_iLeft = iLeft;
tNew.m_iRight = iRight;
}
return dFilterTree.GetLength()-1;
}
static void ConcatTrees ( std::unique_ptr<FilterTreeNode_t> & pLeft, std::unique_ptr<FilterTreeNode_t> & pRight )
{
auto pRoot = std::make_unique<FilterTreeNode_t>();
pRoot->m_pLeft = std::move(pLeft);
pRoot->m_pRight = std::move(pRight);
pLeft = std::move(pRoot);
}
FilterTreeConstructor_c::FilterTreeConstructor_c ( CSphQuery & tQuery, CSphString & sError, CSphString & sWarning )
: m_tQuery ( tQuery )
, m_sError ( sError )
, m_sWarning ( sWarning )
{}
bool FilterTreeConstructor_c::Parse ( const JsonObj_c & tObj )
{
bool bOk;
std::unique_ptr<FilterTreeNode_t> pRoot;
JsonObj_c tBool = tObj.GetItem("bool");
if ( tBool )
std::tie ( bOk, pRoot ) = ConstructBoolFilters(tBool);
else
std::tie ( bOk, pRoot ) = ConstructPlainFilters(tObj);
if ( !bOk )
return false;
if ( !pRoot )
return true;
bool bAllAnd = true;
WalkTree ( pRoot, [&bAllAnd]( auto & pNode ){ bAllAnd &= !pNode->m_bOr; } );
if ( bAllAnd )
{
// no tree; collect filters from the tree and add them to the query
WalkTree ( pRoot, [this]( auto & pNode ){ if ( pNode->m_pFilter ) m_tQuery.m_dFilters.Add ( std::move ( *pNode->m_pFilter ) ); } );
}
else
CreateFilterTree ( pRoot, m_tQuery.m_dFilters, m_tQuery.m_dFilterTree );
return true;
}
std::pair<bool, std::unique_ptr<FilterTreeNode_t>> FilterTreeConstructor_c::ConstructPlainFilters ( const JsonObj_c & tObj )
{
if ( !CheckRootNode ( tObj, m_sError ) )
return { false, nullptr };
for ( const auto & tChild : tObj )
if ( IsFilter(tChild) )
{
int iFirstNewItem = m_tQuery.m_dItems.GetLength();
auto pFilter = ConstructFilter ( tChild, m_tQuery.m_dItems );
if ( !pFilter )
return { false, nullptr };
AddToSelectList ( m_tQuery, m_tQuery.m_dItems, iFirstNewItem );
// handle only the first filter in this case
return { true, std::move(pFilter) };
}
return { true, nullptr };
}
std::pair<bool, std::unique_ptr<FilterTreeNode_t>> FilterTreeConstructor_c::ConstructBoolFilters ( const JsonObj_c & tBool )
{
if ( !tBool.IsObj() )
{
m_sError = "\"bool\" value should be an object";
return { false, nullptr };
}
bool bOk = false;
std::unique_ptr<FilterTreeNode_t> pMustTreeRoot, pShouldTreeRoot, pMustNotTreeRoot, pFilterTreeRoot;
CSphVector<CSphQueryItem> dMustQI, dShouldQI, dMustNotQI;
for ( const auto & tClause : tBool )
{
CSphString sName = tClause.Name();
if ( sName=="must" )
{
std::tie ( bOk, pMustTreeRoot ) = ConstructBoolNodeFilters ( tClause, dMustQI, false );
if ( !bOk )
return { false, nullptr };
}
else if ( sName=="should" )
{
std::tie ( bOk, pShouldTreeRoot ) = ConstructBoolNodeFilters ( tClause, dShouldQI, true );
if ( !bOk )
return { false, nullptr };
}
else if ( sName=="must_not" )
{
std::tie ( bOk, pMustNotTreeRoot ) = ConstructBoolNodeFilters ( tClause, dMustNotQI, false );
if ( !bOk )
return { false, nullptr };
} else if ( sName=="filter" )
{
std::tie ( bOk, pFilterTreeRoot ) = ConstructBoolNodeFilters ( tClause, dMustQI, false );
if ( !bOk )
return { false, nullptr };
} else if ( sName=="minimum_should_match" ) // FIXME!!! add to should as option
{
continue;
} else
{
m_sError.SetSprintf ( "unknown bool query type: \"%s\"", sName.cstr() );
return { false, nullptr };
}
}
if ( pFilterTreeRoot )
{
if ( pMustTreeRoot )
ConcatTrees ( pMustTreeRoot, pFilterTreeRoot );
else
pMustTreeRoot = std::move ( pFilterTreeRoot );
}
if ( pMustNotTreeRoot )
{
// fixme! this may not work as expected; better add a NOT node
WalkTree ( pMustNotTreeRoot, []( auto & pNode ) { if ( pNode->m_pFilter ) pNode->m_pFilter->m_bExclude = !pNode->m_pFilter->m_bExclude; } );
for ( auto & i : dMustNotQI )
dMustQI.Add(i);
if ( pMustTreeRoot )
ConcatTrees ( pMustTreeRoot, pMustNotTreeRoot );
else
pMustTreeRoot = std::move ( pMustNotTreeRoot );
}
if ( pMustTreeRoot )
{
AddToSelectList ( m_tQuery, dMustQI );
for ( const auto & i : dMustQI )
m_tQuery.m_dItems.Add(i);
return { true, std::move(pMustTreeRoot) };
}
if ( pShouldTreeRoot )
{
AddToSelectList ( m_tQuery, dShouldQI );
for ( const auto & i : dShouldQI )
m_tQuery.m_dItems.Add(i);
return { true, std::move(pShouldTreeRoot) };
}
return { true, nullptr };
}
static std::unique_ptr<FilterTreeNode_t> ConcatFilterTreeItems ( std::vector<std::unique_ptr<FilterTreeNode_t>> & dAdded, DWORD uStart, bool bOr )
{
if ( dAdded.empty() )
return nullptr;
if ( uStart==dAdded.size()-1 )
return std::move ( dAdded[uStart] );
auto pRoot = std::make_unique<FilterTreeNode_t>();
pRoot->m_pLeft = std::move ( dAdded[uStart] );
pRoot->m_pRight = ConcatFilterTreeItems ( dAdded, uStart+1, bOr );
pRoot->m_bOr = bOr;
return pRoot;
}
std::pair<bool, std::unique_ptr<FilterTreeNode_t>> FilterTreeConstructor_c::ConstructBoolNodeFilters ( const JsonObj_c & tClause, CSphVector<CSphQueryItem> & dQueryItems, bool bOr )
{
if ( tClause.IsArray() )
{
std::vector<std::unique_ptr<FilterTreeNode_t>> dAdded;
for ( const auto & tObject : tClause )
{
if ( !tObject.IsObj() )
{
m_sError.SetSprintf ( "\"%s\" array value should be an object", tClause.Name() );
return {false, nullptr};
}
JsonObj_c tItem = tObject[0];
if ( !tItem )
continue;
CSphString sName = tItem.Name();
if ( sName=="bool" )
{
auto tRes = ConstructBoolFilters(tItem);
if ( !tRes.first )
return {false, nullptr};
if ( tRes.second )
dAdded.push_back ( std::move(tRes.second) );
} else if ( IsFilter(tItem) )
{
auto pFilter = ConstructFilter ( tItem, dQueryItems );
if ( !pFilter )
return {false, nullptr};
// might be a list from range string with both lt(e) and gt(e) nodes
if ( pFilter->m_pRight )
dAdded.push_back ( std::move(pFilter->m_pRight) );
dAdded.push_back ( std::move(pFilter) );
}
}
if ( dAdded.empty() )
return {true, nullptr};
return { true, ConcatFilterTreeItems ( dAdded, 0, bOr ) };
}
else if ( tClause.IsObj() )
{
JsonObj_c tItem = tClause[0];
if ( IsFilter(tItem) )
{
auto pFilter = ConstructFilter ( tItem, dQueryItems );
if ( !pFilter )
return {false, nullptr};
return { true, std::move(pFilter) };
}
return {true, nullptr};
}
m_sError.SetSprintf ( "\"%s\" value should be an object or an array", tClause.Name() );
return {false, nullptr};
}
JsonObj_c FilterTreeConstructor_c::GetFilterColumn ( const JsonObj_c & tJson )
{
if ( !tJson.IsObj() )
{
m_sError = "filter should be an object";
return JsonNull;
}
if ( tJson.Size()!=1 )
{
m_sError = "filter should have only one element";
return JsonNull;
}
JsonObj_c tColumn = tJson[0];
if ( !tColumn )
{
m_sError = "empty filter found";
return JsonNull;
}
return tColumn;
}
static void SetMvaFilterFunc ( CSphFilterSettings & tFilter );
std::unique_ptr<FilterTreeNode_t> FilterTreeConstructor_c::ConstructInFilter ( const JsonObj_c & tJson )
{
JsonObj_c tColumn = GetFilterColumn(tJson);
if ( !tColumn )
return nullptr;
if ( !tColumn.IsArray() )
{
m_sError = "\"in\" filter should contain an array of values";
return nullptr;
}
auto pFilterNode = std::make_unique<FilterTreeNode_t>();
pFilterNode->m_pFilter = std::make_unique<CSphFilterSettings>();
auto & tFilter = *pFilterNode->m_pFilter;
tFilter.m_sAttrName = tColumn.Name();
sphColumnToLowercase ( const_cast<char *>( tFilter.m_sAttrName.cstr() ) );
SetMvaFilterFunc ( tFilter );
if ( tColumn.Size() )
{
ESphAttr eValueType = Json2AttrType ( tColumn[0] );
switch ( eValueType )
{
case SPH_ATTR_STRING:
tFilter.m_eType = SPH_FILTER_STRING_LIST;
break;
case SPH_ATTR_FLOAT:
case SPH_ATTR_NONE:
m_sError = "\"in\" supports only integer, bool and string values";
return nullptr;
default:
tFilter.m_eType = SPH_FILTER_VALUES;
break;
}
for ( const auto & i : tColumn )
{
ESphAttr eNewValueType = Json2AttrType(i);
if ( eNewValueType!=eValueType )
{
m_sError = "all values in the \"in\" filter should have one type";
return nullptr;
}
if ( eValueType==SPH_ATTR_STRING )
tFilter.m_dStrings.Add ( i.StrVal() );
else
tFilter.m_dValues.Add ( i.IntVal() );
}
}
tFilter.m_dStrings.Uniq();
tFilter.m_dValues.Uniq();
return pFilterNode;
}
std::unique_ptr<FilterTreeNode_t> FilterTreeConstructor_c::ConstructGeoFilter ( const JsonObj_c & tJson, CSphVector<CSphQueryItem> & dQueryItems )
{
GeoDistInfo_c tGeoDist;
if ( !tGeoDist.Parse ( tJson, true, m_sError, m_sWarning ) )
return nullptr;
CSphQueryItem & tQueryItem = dQueryItems.Add();
tQueryItem.m_sExpr = tGeoDist.BuildExprString();
tQueryItem.m_sAlias.SetSprintf ( "%s%d", g_szFilter, m_iQueryItemId++ );
auto pFilterNode = std::make_unique<FilterTreeNode_t>();
pFilterNode->m_pFilter = std::make_unique<CSphFilterSettings>();
auto & tFilter = *pFilterNode->m_pFilter;
tFilter.m_sAttrName = tQueryItem.m_sAlias;
tFilter.m_bOpenLeft = true;
tFilter.m_bHasEqualMax = true;
tFilter.m_fMaxValue = tGeoDist.GetDistance();
tFilter.m_eType = SPH_FILTER_FLOATRANGE;
return pFilterNode;
}
std::unique_ptr<FilterTreeNode_t> FilterTreeConstructor_c::ConstructExistsFilter ( const JsonObj_c & tJson )
{
JsonObj_c tColumn = GetFilterColumn(tJson);
if ( !tColumn )
return nullptr;
auto pFilterNode = std::make_unique<FilterTreeNode_t>();
pFilterNode->m_pFilter = std::make_unique<CSphFilterSettings>();
auto & tFilter = *pFilterNode->m_pFilter;
const char sFieldName[] = "field";
bool bFieldType = ( strncmp ( tColumn.Name(), sFieldName, sizeof(sFieldName) )==0 );
// that is non standard extension from compart mode filter fixup
const char sAttrName[] = "attr";
bool bAttrType = false;
if ( !bFieldType )
bAttrType = ( strncmp ( tColumn.Name(), sAttrName, sizeof(sAttrName) )==0 );
if ( !bFieldType && !bAttrType )
{
m_sError = "exists filter should have only one field element";
return nullptr;
}
if ( !tColumn.IsStr() )
{
m_sError = "exists filter expects string value";
return nullptr;
}
if ( bFieldType )
{
tFilter.m_sAttrName.SetSprintf ( "%s_len", tColumn.SzVal() );
tFilter.m_eType = SPH_FILTER_VALUES;
tFilter.m_dValues.Add ( 0 );
tFilter.m_bExclude = true;
} else
{
tFilter.m_sAttrName.SetSprintf ( "length(%s)!=0", tColumn.SzVal() );
tFilter.m_eType = SPH_FILTER_EXPRESSION;
}
return pFilterNode;
}
std::unique_ptr<FilterTreeNode_t> FilterTreeConstructor_c::ConstructFilter ( const JsonObj_c & tJson, CSphVector<CSphQueryItem> & dQueryItems )
{
CSphString sName = tJson.Name();
if ( sName=="equals" )
return ConstructEqualsFilter(tJson);
if ( sName=="range" )
return ConstructRangeFilter(tJson);
if ( sName=="in" )
return ConstructInFilter(tJson);
if ( sName=="geo_distance" )
return ConstructGeoFilter ( tJson, dQueryItems );
if ( sName=="exists" )
return ConstructExistsFilter ( tJson );
m_sError.SetSprintf ( "unknown filter type: %s", sName.cstr() );
return nullptr;
}
void SetMvaFilterFunc ( CSphFilterSettings & tFilter )
{
bool bAll = tFilter.m_sAttrName.Begins ( "all(" );
bool bAny = tFilter.m_sAttrName.Begins ( "any(" );
if ( !bAll && !bAny )
return;
int iLen = tFilter.m_sAttrName.Length();
if ( iLen<5 )
return;
CSphString sName;
sName.SetBinary ( tFilter.m_sAttrName.cstr()+4, iLen-5 );
tFilter.m_sAttrName.Swap ( sName );
tFilter.m_eMvaFunc = ( bAll ? SPH_MVAFUNC_ALL : SPH_MVAFUNC_ANY );
}
std::unique_ptr<FilterTreeNode_t> FilterTreeConstructor_c::ConstructEqualsFilter ( const JsonObj_c & tJson )
{
JsonObj_c tColumn = GetFilterColumn(tJson);
if ( !tColumn )
return nullptr;
if ( !tColumn.IsNum() && !tColumn.IsStr() && !tColumn.IsArray() )
{
m_sError = "\"equals\" filter expects numeric or string values";
return nullptr;
}
auto pFilterNode = std::make_unique<FilterTreeNode_t>();
pFilterNode->m_pFilter = std::make_unique<CSphFilterSettings>();
auto & tFilter = *pFilterNode->m_pFilter;
tFilter.m_sAttrName = tColumn.Name();
sphColumnToLowercase ( const_cast<char *>( tFilter.m_sAttrName.cstr() ) );
SetMvaFilterFunc ( tFilter );
if ( tColumn.IsInt() )
{
tFilter.m_eType = SPH_FILTER_VALUES;
tFilter.m_dValues.Add ( tColumn.IntVal() );
} else if ( tColumn.IsNum() )
{
tFilter.m_eType = SPH_FILTER_FLOATRANGE;
tFilter.m_fMinValue = tColumn.FltVal();
tFilter.m_fMaxValue = tColumn.FltVal();
tFilter.m_bHasEqualMin = true;
tFilter.m_bHasEqualMax = true;
tFilter.m_bExclude = false;
} else if ( tColumn.IsArray() )
{
int iSize = tColumn.Size();
if ( iSize )
{
if ( tColumn[0].IsStr() )
{
tFilter.m_eType = SPH_FILTER_STRING_LIST;
tFilter.m_dStrings.Resize ( iSize );
} else
{
tFilter.m_eType = SPH_FILTER_VALUES;
tFilter.m_dValues.Resize ( iSize );
}
}
for ( int i=0; i<iSize; i++ )
{
const JsonObj_c tVal = tColumn[i];
if ( tFilter.m_eType==SPH_FILTER_STRING_LIST )
{
tFilter.m_dStrings[i] = tVal.StrVal();
} else
{
tFilter.m_dValues[i] = tVal.IntVal();
}
}
} else
{
tFilter.m_eType = SPH_FILTER_STRING;
tFilter.m_dStrings.Add ( tColumn.StrVal() );
tFilter.m_bExclude = false;
}
return pFilterNode;
}
static void SetRangeStrLess ( const JsonObj_c & tLess, CSphFilterSettings & tFilter )
{
tFilter.m_dStrings.Add ( tLess.StrVal() );
if ( tFilter.m_bHasEqualMax )
{
tFilter.m_eStrCmpDir = EStrCmpDir::GT;
tFilter.m_bExclude = true;
} else
{
tFilter.m_eStrCmpDir = EStrCmpDir::LT;
}
}
static void SetRangeStrGreater ( const JsonObj_c & tGreater, CSphFilterSettings & tFilter )
{
tFilter.m_dStrings.Add ( tGreater.StrVal() );
tFilter.m_eStrCmpDir = EStrCmpDir::GT;
if ( tFilter.m_bHasEqualMin )
{
tFilter.m_eStrCmpDir = EStrCmpDir::LT;
tFilter.m_bExclude = true;
} else
{
tFilter.m_eStrCmpDir = EStrCmpDir::GT;
}
}
std::unique_ptr<FilterTreeNode_t> FilterTreeConstructor_c::ConstructRangeFilter ( const JsonObj_c & tJson )
{
JsonObj_c tColumn = GetFilterColumn(tJson);
if ( !tColumn )
return nullptr;
auto pFilterNode = std::make_unique<FilterTreeNode_t>();
pFilterNode->m_pFilter = std::make_unique<CSphFilterSettings>();
auto & tFilter = *pFilterNode->m_pFilter;
tFilter.m_sAttrName = tColumn.Name();
sphColumnToLowercase ( const_cast<char *>( tFilter.m_sAttrName.cstr() ) );
tFilter.m_bHasEqualMin = false;
tFilter.m_bHasEqualMax = false;
JsonObj_c tLess = tColumn.GetItem("lt");
if ( !tLess )
{
tLess = tColumn.GetItem("lte");
tFilter.m_bHasEqualMax = tLess;
}
JsonObj_c tGreater = tColumn.GetItem("gt");
if ( !tGreater )
{
tGreater = tColumn.GetItem("gte");
tFilter.m_bHasEqualMin = tGreater;
}
bool bLess = tLess;
bool bGreater = tGreater;
if ( !bLess && !bGreater )
{
m_sError = "empty filter found";
return nullptr;
}
int64_t iLessVal = -1;
int64_t iGreaterVal = -1;
if ( ( bLess && !tLess.IsNum() ) || ( bGreater && !tGreater.IsNum() ) )
{
JsonObj_c tDateFormat = tColumn.GetStrItem ( "format", m_sError, true );
if ( tDateFormat && tDateFormat.StrVal()=="strict_date_optional_time" )
{
if ( bLess )
iLessVal = GetUTC ( tLess.StrVal() );
if ( bGreater )
iGreaterVal = GetUTC ( tGreater.StrVal() );
}
if ( ( bLess && iLessVal==-1 && tLess.IsStr() && tLess.SzVal() && strcmp ( tLess.SzVal(), "now" )==0 )
|| ( bGreater && iGreaterVal==-1 && tGreater.IsStr() && tGreater.SzVal() && strcmp ( tGreater.SzVal(), "now" )==0 ) )
{
if ( bLess && iLessVal==-1 )
iLessVal = (int) time ( nullptr );
if ( bGreater && iGreaterVal )
iGreaterVal = (int) time ( nullptr );
}
// full string comparsion in range
if ( ( bLess && iLessVal==-1 ) || ( bGreater && iGreaterVal==-1) )
{
tFilter.m_eType = SPH_FILTER_STRING;
if ( bLess && bGreater )
{
auto pFilterNodeGt = std::make_unique<FilterTreeNode_t>();
pFilterNodeGt->m_pFilter = std::make_unique<CSphFilterSettings>( tFilter );
SetRangeStrLess ( tLess, tFilter );
SetRangeStrGreater ( tGreater, *pFilterNodeGt->m_pFilter );
pFilterNode->m_pRight = std::move ( pFilterNodeGt );
}
else if ( bLess )
{
SetRangeStrLess ( tLess, tFilter );
} else
{
SetRangeStrGreater ( tGreater, tFilter );
}
return pFilterNode;
}
if ( ( bLess && iLessVal==-1 ) || ( bGreater && iGreaterVal==-1) )
{
m_sError = "range filter expects numeric values";
return nullptr;
}
} else
{
if ( bLess && tLess.IsInt() )
iLessVal = tLess.IntVal();
if ( bGreater && tGreater.IsInt() )
iGreaterVal = tGreater.IntVal();
}
bool bIntFilter = ( ( !bLess || tLess.IsInt() || tLess.IsStr() ) && ( !bGreater || tGreater.IsInt() || tGreater.IsStr() ) );
if ( bLess )
{
if ( bIntFilter )
tFilter.m_iMaxValue = iLessVal;
else
tFilter.m_fMaxValue = tLess.FltVal();
tFilter.m_bOpenLeft = !bGreater;
}
if ( bGreater )
{
if ( bIntFilter )
tFilter.m_iMinValue = iGreaterVal;
else
tFilter.m_fMinValue = tGreater.FltVal();
tFilter.m_bOpenRight = !bLess;
}
tFilter.m_eType = bIntFilter ? SPH_FILTER_RANGE : SPH_FILTER_FLOATRANGE;
// float filters don't support open ranges
if ( !bIntFilter )
{
if ( tFilter.m_bOpenRight )
{
tFilter.m_fMaxValue = FLT_MAX;
tFilter.m_bHasEqualMax = true;
}
if ( tFilter.m_bOpenLeft )
{
tFilter.m_fMinValue = -FLT_MAX;
tFilter.m_bHasEqualMin = true;
}
}
return pFilterNode;
}
static bool ConstructFilters ( const JsonObj_c & tJson, CSphQuery & tQuery, CSphString & sError, CSphString & sWarning )
{
if ( !tJson )
return false;
CSphString sName = tJson.Name();
if ( sName.IsEmpty() )
return false;
bool bKNN = sName=="knn";
if ( sName!="query" && !bKNN )
{
sError.SetSprintf ( R"("query" or "knn" expected, got %s)", sName.cstr() );
return false;
}
JsonObj_c tKNNFilter;
if ( bKNN )
{
tKNNFilter = tJson.GetObjItem ( "filter", sError, true );
if ( !tKNNFilter )
return true;
}
FilterTreeConstructor_c tTreeConstructor ( tQuery, sError, sWarning );
return tTreeConstructor.Parse ( bKNN ? tKNNFilter : tJson );
}
bool ParseJsonQueryFilters ( const JsonObj_c & tJson, CSphQuery & tQuery, CSphString & sError, CSphString & sWarning )
{
if ( tJson && !tJson.IsObj() )
{
sError = "\"query\" property value should be an object";
return false;
}
CSphQueryItem & tItem = tQuery.m_dItems.Add();
tItem.m_sExpr = "*";
tItem.m_sAlias = "*";
tQuery.m_sSelect = "*";
// we need to know if the query is fullscan before re-parsing it to build AST tree
// so we need to do some preprocessing here
bool bFullscan = !tJson || ( tJson.Size()==1 && tJson.HasItem("match_all") );
if ( !bFullscan )
{
if ( CSphString ( tJson.Name() )=="knn" )
{
JsonObj_c tFilter = tJson.GetObjItem ( "filter", sError, true );
if ( tFilter )
tQuery.m_sQuery = tFilter.AsString();
}
else
tQuery.m_sQuery = tJson.AsString();
}
// because of the way sphinxql parsing was implemented
// we need to parse our query and extract filters now
// and parse the rest of the query later
if ( tJson && !ConstructFilters ( tJson, tQuery, sError, sWarning ) )
return false;
return true;
}
| 28,882
|
C++
|
.cpp
| 897
| 29.400223
| 223
| 0.682383
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,844
|
datetime.cpp
|
manticoresoftware_manticoresearch/src/datetime.cpp
|
//
// Copyright (c) 2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "datetime.h"
#include "fileutils.h"
#include "cctz/time_zone.h"
#include <stdlib.h>
static bool g_bIntialized = false;
static bool g_bTimeZoneUTC = false;
static bool g_bTimeZoneSet = false;
static cctz::time_zone g_hTimeZone, g_hTimeZoneLocal, g_hTimeZoneUTC;
static void CheckForUTC()
{
g_bTimeZoneUTC = !strcasecmp ( g_hTimeZone.name().c_str(), "UTC" );
}
#if !_WIN32
static CSphString DetermineLocalTimeZoneName ( CSphString & sWarning )
{
CSphString sPrefix = "Error resolving local time zone from";
CSphString sTimeZoneFile = "/etc/localtime";
const char * szTZ = getenv("TZ");
if ( szTZ )
{
sPrefix.SetSprintf ( "%s TZ='%s'", sPrefix.cstr(), szTZ );
if ( *szTZ==':' )
++szTZ;
sTimeZoneFile = szTZ;
}
else
sPrefix.SetSprintf ( "%s '%s'", sPrefix.cstr(), sTimeZoneFile.cstr() );
CSphString sTimeZoneDir = "/usr/share/zoneinfo/";
const char * szTZDIR = getenv("TZDIR");
if ( szTZDIR )
{
sPrefix.SetSprintf ( "%s and TZDIR='%s'", sPrefix.cstr(), szTZDIR );
sTimeZoneDir = szTZDIR;
}
else
sPrefix.SetSprintf ( "%s and time zone dir '%s'", sPrefix.cstr(), sTimeZoneDir.cstr() );
CSphString sResolved;
if ( IsSymlink(sTimeZoneFile) && !ResolveSymlink ( sTimeZoneFile, sResolved ) )
{
sWarning = sPrefix;
return "UTC";
}
sTimeZoneFile = sResolved;
if ( IsPathAbsolute(sTimeZoneFile) )
{
if ( !sphFileExists(sTimeZoneFile) )
{
sWarning = sPrefix;
return "UTC";
}
if ( sTimeZoneFile.Begins ( sTimeZoneDir.cstr() ) )
return sTimeZoneFile.SubString ( sTimeZoneDir.Length(), sTimeZoneFile.Length()-sTimeZoneDir.Length() );
sWarning = sPrefix;
return "UTC";
}
if ( !sTimeZoneDir.Ends("/") )
sTimeZoneDir.SetSprintf ( "%s/", sTimeZoneDir.cstr() );
CSphString sCheck;
sCheck.SetSprintf ( "%s%s", sTimeZoneDir.cstr(), sTimeZoneFile.cstr() );
if ( sphFileExists(sCheck) )
return sTimeZoneFile;
sWarning = sPrefix;
return "UTC";
}
static CSphString FixupZoneName ( const CSphString & sName )
{
if ( sName=="Etc/UTC" )
return "UTC";
return sName;
}
#endif
static void SetTimeZoneLocal ( StrVec_t & dWarnings )
{
CSphString sDirName;
sDirName.SetSprintf ( "%s/tzdata", GET_FULL_SHARE_DIR() );
#if _WIN32
_putenv_s ( "TZDIR", sDirName.cstr() );
// use cctz's internal local time zone code
g_hTimeZoneLocal = cctz::local_time_zone();
#else
CSphString sWarning;
CSphString sZone = DetermineLocalTimeZoneName(sWarning);
if ( !sWarning.IsEmpty() )
dWarnings.Add(sWarning);
sZone = FixupZoneName(sZone);
setenv ( "TZDIR", sDirName.cstr(), 1 );
if ( !cctz::load_time_zone ( sZone.cstr(), &g_hTimeZoneLocal ) )
{
sWarning.SetSprintf ( "Unable to load local time zone '%s' from '%s' dir", sZone.cstr(), sDirName.cstr() );
dWarnings.Add(sWarning);
g_hTimeZoneLocal = g_hTimeZoneUTC;
}
#endif
g_hTimeZoneLocal = cctz::local_time_zone();
g_hTimeZone = g_hTimeZoneLocal;
CheckForUTC();
}
void InitTimeZones ( StrVec_t & dWarnings )
{
cctz::load_time_zone ( "UTC", &g_hTimeZoneUTC );
SetTimeZoneLocal ( dWarnings );
g_bIntialized = true;
}
bool SetTimeZone ( const char * szTZ, CSphString & sError )
{
if ( !cctz::load_time_zone ( szTZ, &g_hTimeZone ) )
{
sError.SetSprintf ( "Unable to set time zone '%s'", szTZ );
return false;
}
g_bTimeZoneSet = !g_hTimeZone.name().empty() && strcasecmp ( g_hTimeZone.name().c_str(), "UTC" );
CheckForUTC();
return true;
}
bool IsTimeZoneSet()
{
return g_bTimeZoneSet;
}
CSphString GetTimeZoneName()
{
if ( !g_bIntialized )
return "";
return g_hTimeZone.name().c_str();
}
CSphString GetLocalTimeZoneName()
{
if ( !g_bIntialized )
return "";
return g_hTimeZoneLocal.name().c_str();
}
cctz::civil_second ConvertTime ( time_t tTime )
{
return cctz::convert ( std::chrono::system_clock::from_time_t(tTime), g_hTimeZone );
}
cctz::civil_second ConvertTimeLocal ( time_t tTime )
{
return cctz::convert ( std::chrono::system_clock::from_time_t(tTime), g_hTimeZoneLocal );
}
cctz::civil_second ConvertTimeUTC ( time_t tTime )
{
return cctz::convert ( std::chrono::system_clock::from_time_t(tTime), g_hTimeZoneUTC );
}
time_t ConvertTime ( const cctz::civil_second & tCS )
{
return std::chrono::system_clock::to_time_t ( cctz::convert ( tCS, g_hTimeZone ) );
}
time_t PackLocalTimeAsUTC ( time_t tTime )
{
if ( g_bTimeZoneUTC )
return tTime;
// convert time to local timezone
auto tSec = cctz::convert ( std::chrono::system_clock::from_time_t(tTime), g_hTimeZone );
// interpret it as absolute UTC time
return cctz::convert ( tSec, g_hTimeZoneUTC ).time_since_epoch().count();
}
int GetWeekDay ( const cctz::civil_second & tTime, bool bSundayFirst )
{
if ( !bSundayFirst )
return (int)cctz::get_weekday(tTime) + 1;
// make sunday #1
int iWeekDay = (int)cctz::get_weekday(tTime) + 2;
if ( iWeekDay==8 )
return 1;
return iWeekDay;
}
int GetYearDay ( const cctz::civil_second & tTime )
{
return (int)cctz::get_yearday(tTime);
}
int GetQuarter ( const cctz::civil_second & tTime )
{
return ( ( tTime.month() - 1 ) / 3 ) + 1;
}
static FORCE_INLINE bool IsLeapYear ( int iYear )
{
return ( iYear & 3 ) == 0 && ( iYear % 100 != 0 || iYear % 400 == 0 );
}
bool IsLeapYear ( const cctz::civil_second & tTime )
{
return IsLeapYear ( tTime.year() );
}
int CalcNumYearDays ( const cctz::civil_second & tTime )
{
return IsLeapYear(tTime) ? 366 : 365;
}
CSphString FormatTime ( time_t tTime, const char * szFmt )
{
std::string sRes = cctz::format ( szFmt, std::chrono::system_clock::from_time_t(tTime), g_hTimeZone );
return sRes.c_str();
}
int CalcYearMonth ( const cctz::civil_second & tTime )
{
return tTime.year()*100 + tTime.month();
}
int CalcYearMonthDay ( const cctz::civil_second & tTime )
{
return tTime.year()*10000 + tTime.month()*100 + tTime.day();
}
int CalcYearWeek ( const cctz::civil_second & tTime )
{
int iPrevSunday = GetYearDay(tTime) - GetWeekDay ( tTime, true ) + 1;
int iYear = tTime.year();
if ( iPrevSunday<=0 ) // check if we crossed year boundary
{
// adjust day and year
iPrevSunday += 365;
iYear--;
if ( IsLeapYear(iYear) )
iPrevSunday++;
}
return iYear*1000 + iPrevSunday;
}
static int CalcDayNumber ( const cctz::civil_day & tTime )
{
cctz::civil_day tFirstDay;
return tTime - tFirstDay;
}
enum WeekFlags_e : uint32_t
{
WEEK_FLAG_MONDAY_FIRST = 1,
WEEK_FLAG_YEAR = 2,
WEEK_FLAG_FIRST_WEEKDAY = 4
};
static uint32_t FixupWeekFlags ( uint32_t uFlags )
{
uFlags &= 7;
if ( !( uFlags & WEEK_FLAG_MONDAY_FIRST ) )
uFlags ^= WEEK_FLAG_FIRST_WEEKDAY;
return uFlags;
}
int CalcWeekNumber ( const cctz::civil_second & tTime, uint32_t uFlags )
{
uFlags = FixupWeekFlags(uFlags);
bool bStartsWithMonday = uFlags & WEEK_FLAG_MONDAY_FIRST;
bool bWeekOfYear = uFlags & WEEK_FLAG_YEAR;
bool bFirstWeekDay = uFlags & WEEK_FLAG_FIRST_WEEKDAY;
cctz::civil_day tFirstDayOfYear ( tTime.year(), 1, 1 );
int iNumDays = CalcDayNumber( cctz::civil_day(tTime) );
int iNumDaysFirst = CalcDayNumber(tFirstDayOfYear);
int iWeekday = GetWeekDay ( tFirstDayOfYear, !bStartsWithMonday ) - 1;
int iYear = tTime.year();
if ( tTime.month()==1 && tTime.day()<=7-iWeekday )
{
if ( !bWeekOfYear && ( ( bFirstWeekDay && iWeekday!=0) || (!bFirstWeekDay && iWeekday >= 4) ) )
return 0;
bWeekOfYear = true;
iYear--;
int iDays = CalcNumYearDays ( cctz::civil_year(iYear) );
iNumDaysFirst -= iDays;
iWeekday = ( iWeekday + 53*7 - iDays ) % 7;
}
int iDays = 0;
if ( (bFirstWeekDay && iWeekday != 0) || (!bFirstWeekDay && iWeekday >= 4) )
iDays = iNumDays - ( iNumDaysFirst + ( 7 - iWeekday ) );
else
iDays = iNumDays - ( iNumDaysFirst - iWeekday );
if ( bWeekOfYear && iDays>=52*7 )
{
iWeekday = ( iWeekday + CalcNumYearDays ( cctz::civil_year(iYear) ) ) % 7;
if ( ( !bFirstWeekDay && iWeekday < 4 ) || ( bFirstWeekDay && !iWeekday ) )
return 1;
}
return iDays/7 + 1;
}
bool ParseAsLocalTime ( const char * szFmt, const CSphString & sTime, time_t & tRes )
{
std::chrono::time_point<std::chrono::system_clock, std::chrono::seconds> tTP;
if ( !cctz::parse ( szFmt, sTime.cstr(), g_hTimeZone, &tTP ) )
return false;
tRes = tTP.time_since_epoch().count();
return true;
}
| 8,550
|
C++
|
.cpp
| 281
| 28.266904
| 109
| 0.702104
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,845
|
tests.cpp
|
manticoresoftware_manticoresearch/src/tests.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxexpr.h"
#include "sphinxutils.h"
#include "sphinxquery.h"
#include "sphinxrt.h"
#include "sphinxint.h"
#include "threadutils.h"
#include "stripper/html_stripper.h"
#include "tokenizer/charset_definition_parser.h"
#include "searchdaemon.h"
#include "sphinxsort.h"
#include "costestimate.h"
#include "histogram.h"
#include "secondaryindex.h"
#include "secondarylib.h"
#include <cmath>
#define SNOWBALL 0
#define CROSSCHECK 0
#define PORTER1 0
#if SNOWBALL
#include "header.h"
#include "api.c"
#include "utilities.c"
#include "stem.c"
#endif
#if PORTER1
#include "porter1.c"
#endif
//////////////////////////////////////////////////////////////////////////
#if _WIN32
#define NOINLINE __declspec(noinline)
#else
#define NOINLINE
#endif
//////////////////////////////////////////////////////////////////////////
static volatile int g_iMutexBench = 0;
void DummyThread ( void * )
{}
void MutexBenchThread ( void * pArg )
{
CSphMutex * pMutex = (CSphMutex *) pArg;
pMutex->Lock();
for ( int i=0; i<100; i++ )
g_iMutexBench++;
g_iMutexBench -= 99;
pMutex->Unlock();
}
void BenchThreads ()
{
printf ( "benchmarking threads\n" );
const int BATCHES = 100;
const int BATCH_THREADS = 100;
const int TOTAL_THREADS = BATCHES*BATCH_THREADS;
SphThread_t * pThd = new SphThread_t [ BATCH_THREADS ];
CSphMutex tMutex;
for ( int iRun=1; iRun<=2; iRun++ )
{
int64_t tmThd = sphMicroTimer();
for ( int iBatch=0; iBatch<BATCHES; iBatch++ )
{
for ( int i=0; i<BATCH_THREADS; i++ )
if ( !Threads::Create ( pThd+i, [&] { if (iRun!=1) MutexBenchThread ( &tMutex ); } ) )
sphDie ( "failed to create thread (batch %d thd %d)", iBatch, i );
for ( int i=0; i<BATCH_THREADS; i++ )
if ( !Threads::Join ( pThd+i ) )
sphDie ( "failed to join to thread (batch %d thd %d)", iBatch, i );
}
tmThd = sphMicroTimer()-tmThd;
if ( iRun==2 && g_iMutexBench!=TOTAL_THREADS )
sphDie ( "failed mutex benchmark (expected=%d, got=%d)", TOTAL_THREADS, g_iMutexBench );
int iThdSec10 = (int)( int64_t(TOTAL_THREADS)*1000000*10/tmThd );
const char * sDesc = ( iRun==1 ) ? "dummy" : "mutex";
printf ( "run %d: %d %s threads in %d.%d msec; %d.%d thd/sec\n",
iRun, TOTAL_THREADS, sDesc,
(int)(tmThd/1000), (int)((tmThd%1000)/100),
iThdSec10/10, iThdSec10%10 );
}
SafeDeleteArray ( pThd );
}
//////////////////////////////////////////////////////////////////////////
void SortDataRepeat1245 ( DWORD * pData, int iCount )
{
const int dFill[4] = { 1, 2, 4, 5 };
for ( int i=0; i<iCount; i++ )
pData[i] = dFill[i%4];
}
void SortDataEnd0 ( DWORD * pData, int iCount )
{
for ( int i=0; i<iCount; i++ )
pData[i] = i+1;
pData[iCount-1] = 0;
}
void SortDataIdentical ( DWORD * pData, int iCount )
{
for ( int i=0; i<iCount; i++ )
pData[i] = 123;
}
void SortDataMed3Killer ( DWORD * pData, int iCount )
{
for ( int i=0; i<iCount/2; i++ )
pData[i] = 1 + i + ( i & 1 )*(iCount/2-1);
for ( int i=iCount/2; i<iCount; i++ )
pData[i] = 2*(i-iCount/2+1);
}
void SortDataMidKiller ( DWORD * pData, int iCount )
{
for ( int i=0; i<2; i++ )
for ( int j=0; j<iCount/2; j++ )
*pData++ = j*2+i;
}
void SortDataRandDupes ( DWORD * pData, int iCount )
{
for ( int i=0; i<iCount; i++ )
pData[i] = sphRand() % ( iCount/10 );
}
void SortDataRandUniq ( DWORD * pData, int iCount )
{
for ( int i=0; i<iCount; i++ )
pData[i] = i;
for ( int i=0; i<iCount; i++ )
Swap ( pData[i], pData[sphRand()%iCount] );
}
void SortDataRandSteps ( DWORD * pData, int iCount )
{
for ( int i=0; i<iCount; i+=100 )
{
int a = i;
int b = Min ( i+100, iCount );
for ( int j=a; j<b; j++ )
pData[j] = j;
for ( int j=a; j<b; j++ )
Swap ( pData[j], pData [ a + sphRand()%(b-a) ] );
}
}
void SortDataRevEnds ( DWORD * pData, int iCount )
{
for ( int i=0; i<iCount; i++ )
pData[i] = i;
Swap ( pData[0], pData[iCount-1] );
}
void SortDataRevPartial ( DWORD * pData, int iCount )
{
for ( int i=0; i<iCount; i++ )
pData[i] = iCount-i;
for ( int i=0; i<iCount/10; i++ )
Swap ( pData[sphRand()%iCount], pData[sphRand()%iCount] );
}
void SortDataRevSaw ( DWORD * pData, int iCount )
{
for ( int i=0; i<iCount; i+=100 )
{
int a = i;
int b = Min ( i+100, iCount );
for ( int j=a; j<b; j++ )
pData[j] = b-j;
}
}
void SortDataReverse ( DWORD * pData, int iCount )
{
for ( int i=0; i<iCount; i++ )
pData[i] = iCount-i;
}
void SortDataStart1000 ( DWORD * pData, int iCount )
{
for ( int i=0; i<iCount; i++ )
pData[i] = 1+i;
pData[0] = 1000;
}
void SortDataSeqPartial ( DWORD * pData, int iCount )
{
for ( int i=0; i<iCount; i++ )
pData[i] = 1+i;
for ( int i=0; i<iCount/10; i++ )
Swap ( pData[sphRand()%iCount], pData[sphRand()%iCount] );
}
void SortDataSeqSaw ( DWORD * pData, int iCount )
{
for ( int i=0; i<iCount; i+=100 )
{
int a = i;
int b = Min ( i+100, iCount );
for ( int j=a; j<b; j++ )
pData[j] = j-a+1;
}
}
void SortDataSeq ( DWORD * pData, int iCount )
{
for ( int i=0; i<iCount; i++ )
pData[i] = 1+i;
}
void SortDataAscDesc ( DWORD * pData, int iCount )
{
for ( int i=0; i<iCount/2; i++ )
pData[i] = 1+i;
for ( int i=iCount/2; i<iCount; i++ )
pData[i] = iCount-i;
}
void SortDataDescAsc ( DWORD * pData, int iCount )
{
for ( int i=0; i<iCount/2; i++ )
pData[i] = iCount/2-i;
for ( int i=iCount/2; i<iCount; i++ )
pData[i] = i-iCount/2+1;
}
void SortDataRand01 ( DWORD * pData, int iCount )
{
for ( int i=0; i<iCount; i++ )
pData[i] = ( sphRand()>>3 ) & 1;
}
typedef void (*SortDataGen_fn)( DWORD *, int );
struct SortDataGenDesc_t
{
SortDataGen_fn m_fnGen;
const char * m_sName;
};
static SortDataGenDesc_t g_dSortDataGens[] =
{
{ SortDataRepeat1245, "repeat1245" },
{ SortDataEnd0, "end0" },
{ SortDataIdentical, "identical" },
{ SortDataMed3Killer, "med3killer" },
{ SortDataMidKiller, "midkiller" },
{ SortDataRandDupes, "randdupes" },
{ SortDataRandUniq, "randuniq" },
{ SortDataRandSteps, "randsteps" },
{ SortDataRevEnds, "revends" },
{ SortDataRevPartial, "revpartial" },
{ SortDataRevSaw, "revsaw" },
{ SortDataReverse, "reverse" },
{ SortDataStart1000, "start1000" },
{ SortDataSeqPartial, "seqpartial" },
{ SortDataSeqSaw, "seqsaw" },
{ SortDataSeq, "sequential" },
{ SortDataAscDesc, "ascdesc" },
{ SortDataDescAsc, "descasc" },
{ SortDataRand01, "rand01" },
};
struct SortPayload_t
{
DWORD m_uKey;
DWORD m_uPayload[3];
bool operator < ( const SortPayload_t & rhs ) const
{
return m_uKey < rhs.m_uKey;
}
};
inline bool operator < ( const CSphWordHit & a, const CSphWordHit & b )
{
return
( a.m_uWordID<b.m_uWordID || \
( a.m_uWordID==b.m_uWordID && a.m_tRowID<b.m_tRowID ) || \
( a.m_uWordID==b.m_uWordID && a.m_tRowID==b.m_tRowID && a.m_uWordPos<b.m_uWordPos ) );
}
template < typename T >
int64_t BenchSort ( T * pData, int iCount, bool bCheck )
{
int64_t tmSort = sphMicroTimer();
sphSort ( pData, iCount );
tmSort = sphMicroTimer() - tmSort;
if ( bCheck )
{
for ( int i=0; i<iCount-1 && bCheck; i++ )
if ( pData[i+1] < pData[i] )
bCheck = false;
if ( !bCheck )
sphDie ( "sorting results check failed!" );
}
return tmSort;
}
void BenchSort ()
{
const int MINSIZE = 100;
const int MAXSIZE = 100000;
const int RUNS = 100;
FILE * fpRes = fopen ( "benchsort/res.csv", "w+" );
if ( !fpRes )
sphDie ( "failed to create benchsort/res.csv" );
fprintf ( fpRes, "test-name;runs-by-size;total-time\n" );
// bench synthetic payloads
DWORD * pKeys = new DWORD [ MAXSIZE ];
SortPayload_t * pValues = new SortPayload_t [ MAXSIZE ];
for ( int iGen=0; iGen<(int)(sizeof(g_dSortDataGens)/sizeof(g_dSortDataGens[0])); iGen++ )
for ( int iLen=MINSIZE; iLen<=MAXSIZE; iLen*=10 )
{
int64_t tmSort = 0;
for ( int iRun=0; iRun<RUNS; iRun++ )
{
g_dSortDataGens[iGen].m_fnGen ( pKeys, iLen );
for ( int i=0; i<iLen; i++ )
pValues[i].m_uKey = pKeys[i];
tmSort += BenchSort ( pValues, iLen, iRun==0 );
}
printf ( "%-12s 100x%-8d %d.%03d msec\n", g_dSortDataGens[iGen].m_sName, iLen, (int)(tmSort/1000), (int)(tmSort%1000) );
fprintf ( fpRes, "%s;100x%d;%d,%03d\n", g_dSortDataGens[iGen].m_sName, iLen, (int)(tmSort/1000), (int)(tmSort%1000) );
CSphString sFile;
sFile.SetSprintf ( "benchsort/%s.%d.txt", g_dSortDataGens[iGen].m_sName, iLen );
#if 0
FILE * fp = fopen ( sFile.cstr(), "w+" );
if ( fp )
{
g_dSortDataGens[iGen].m_fnGen ( pKeys, iLen );
for ( int i=0; i<iLen; i++ )
fprintf ( fp, "%d\n", pKeys[i] );
fclose ( fp );
}
#endif
}
SafeDeleteArray ( pKeys );
SafeDeleteArray ( pValues );
// bench real hits
const int MAXHITS = 10000000;
const char * sHits = "benchsort/hits.bin";
CSphWordHit * pHits = new CSphWordHit [ MAXHITS ];
int HITS = MAXHITS;
FILE * fp = fopen ( sHits, "rb+" );
if ( !fp )
sphDie ( "failed to open %s", sHits );
if ( (int)fread ( pHits, sizeof(CSphWordHit), HITS, fp )!=HITS )
sphDie ( "failed to read %s", sHits );
fclose ( fp );
int64_t tmSort = BenchSort ( pHits, HITS, true );
printf ( "%-12s 100x%-8d %d.%03d msec\n", "hits", HITS, (int)(tmSort/1000), (int)(tmSort%1000) );
fprintf ( fpRes, "%s;100x%d;%d,%03d\n", "hits", HITS, (int)(tmSort/1000), (int)(tmSort%1000) );
SafeDeleteArray ( pHits );
// owl down
fclose ( fpRes );
exit ( 0 );
}
//////////////////////////////////////////////////////////////////////////
static const char * CORPUS = "corpus.txt";
const int POOLSIZE = 80*1048576;
const int GAP = 4;
void BenchStemmer ()
{
CSphString sError;
#if SNOWBALL
SN_env * pSnow = english_ISO_8859_1_create_env();
#if 1
char test[] = "this";
SN_set_current ( pSnow, strlen(test), (const symbol *)test );
pSnow->p [ pSnow->l ] = 0;
english_ISO_8859_1_stem ( pSnow );
stem_en ( (BYTE*)test, strlen(test) );
#endif
#endif
#if PORTER1
struct stemmer * z = create_stemmer();
#endif
BYTE * pRaw = new BYTE [ POOLSIZE ];
FILE * fp = fopen ( CORPUS, "rb" );
if ( !fp )
sphDie ( "fopen %s failed", CORPUS );
auto iLen = (int) fread ( pRaw, 1, POOLSIZE, fp );
printf ( "read %d bytes\n", iLen );
fclose ( fp );
TokenizerRefPtr_c pTok { Tokenizer::Detail::CreateUTF8Tokenizer() };
if ( !pTok->SetCaseFolding ( "A..Z->a..z, a..z", sError ) )
sphDie ( "oops: %s", sError.cstr() );
pTok->SetBuffer ( pRaw, iLen );
BYTE * pTokens = new BYTE [ POOLSIZE ];
BYTE * p = pTokens;
BYTE * sTok;
int iToks = 0;
int iBytes = 0;
int iStemmed = 0;
while ( ( sTok = pTok->GetToken() )!=NULL )
{
BYTE * pStart = p++; // 1 byte for length
while ( *sTok )
*p++ = *sTok++;
*pStart = (BYTE)( p-pStart-1 ); // store length
for ( int i=0; i<GAP; i++ )
*p++ = '\0'; // trailing zero and a safety gap
if ( p>=pTokens+POOLSIZE )
sphDie ( "out of buffer at tok %d", iToks );
iToks++;
}
*p++ = '\0';
iBytes = (int)( p - pTokens );
printf ( "tokenized %d tokens\n", iToks );
#if 0
int dCharStats[256];
memset ( dCharStats, 0, sizeof(dCharStats) );
for ( BYTE * t = pTokens; t<pTokens+iBytes; t++ )
dCharStats[*t]++;
const char * sDump = "aeiouywxY";
for ( const char * s = sDump; *s; s++ )
printf ( "%c: %d\n", *s, dCharStats[*s] );
#endif
int64_t tmStem = sphMicroTimer();
p = pTokens;
iToks = 0;
int iDiff = 0;
while ( *p )
{
#if !SNOWBALL && !PORTER1
stem_en ( p+1, *p );
#endif
#if SNOWBALL
int iLen = *p;
SN_set_current ( pSnow, iLen, p+1 );
english_ISO_8859_1_stem ( pSnow );
#if !CROSSCHECK
// benchmark
memcpy ( p+1, pSnow->p, pSnow->l );
p[pSnow->l+1] = 0;
#else
// crosscheck
char buf[256];
memcpy ( buf, p+1, *p+1 );
stem_en ( p+1, *p );
int ll = strlen ( (char*)p+1 );
if ( ll!=pSnow->l || memcmp ( p+1, pSnow->p, ll ) )
{
pSnow->p[pSnow->l] = 0;
printf ( "%s[%d] vs %s[%d] for orig %s\n", p+1, ll, pSnow->p, pSnow->l, buf );
iDiff++;
}
#endif
#endif
#if PORTER1
p [ stem ( z, (char*)p+1, *p-1 )+2 ] = 0;
#endif
p += *p + GAP + 1;
iToks++;
}
tmStem = sphMicroTimer() - tmStem;
if ( iDiff )
printf ( "%d tokens are different\n", iDiff );
if ( iStemmed )
printf ( "%d data bytes stemmed\n", iStemmed );
#if SNOWBALL
english_ISO_8859_1_close_env ( pSnow );
#endif
uint64_t uHash = sphFNV64 ( pTokens, iBytes );
printf ( "stemmed %d tokens (%d bytes) in %d msec, hash %08x %08x\n",
iToks, iBytes, (int)(tmStem/1000),
(DWORD)( uHash>>32 ), (DWORD)( uHash & 0xffffffffUL ) );
if ( uHash!=U64C ( 0x54ef4f21994b67db ) )
printf ( "ERROR, HASH MISMATCH\n" );
SafeDeleteArray ( pRaw );
}
//////////////////////////////////////////////////////////////////////////
static CSphVector<int64_t> GetFilterEstimates ( const CSphQuery & tQuery, CSphIndex * pIndex )
{
assert ( pIndex );
auto pHistograms = pIndex->Debug_GetHistograms();
assert ( pHistograms );
CSphVector<int64_t> dRes;
for ( const auto & i : tQuery.m_dFilters )
{
auto pHistogram = pHistograms->Get ( i.m_sAttrName );
assert ( pHistogram );
HistogramRset_t tEstimate;
Verify ( pHistogram->EstimateRsetSize ( i, tEstimate ) );
dRes.Add ( tEstimate.m_iTotal );
}
return dRes;
}
static float GetEstimatedCost ( CSphQuery & tQuery, CSphIndex * pIndex, SecondaryIndexType_e eType )
{
auto tStats = pIndex->GetStats();
CSphVector<int64_t> dFilterRsetEstimates = GetFilterEstimates ( tQuery, pIndex );
const CSphVector<SecondaryIndexInfo_t> dSIInfo ( dFilterRsetEstimates.GetLength() );
ARRAY_FOREACH ( i, dSIInfo )
{
dSIInfo[i].m_iRsetEstimate = dFilterRsetEstimates[i];
dSIInfo[i].m_eType = eType;
}
int iCutoff = ApplyImplicitCutoff ( tQuery, {}, false );
SelectIteratorCtx_t tCtx ( tQuery, tQuery.m_dFilters, pIndex->GetMatchSchema(), pIndex->GetMatchSchema(), pIndex->Debug_GetHistograms(), pIndex->GetColumnar(), *(pIndex->Debug_GetSI()), iCutoff, tStats.m_iTotalDocuments, 1 );
int iNumIterators = dSIInfo.count_of ( []( auto & tSI ){ return tSI.m_eType==SecondaryIndexType_e::INDEX || tSI.m_eType==SecondaryIndexType_e::ANALYZER; } );
if ( iNumIterators > 1 )
iCutoff = -1;
std::unique_ptr<CostEstimate_i> pEstimate ( CreateCostEstimate ( dSIInfo, tCtx, iCutoff ) );
return pEstimate->CalcQueryCost();
}
static int RunGenericQuery ( CSphQuery & tQuery, CSphIndex * pIndex, int64_t & iTime, uint64_t & uHash )
{
auto pParser = sphCreatePlainQueryParser();
tQuery.m_pQueryParser = pParser.get();
AggrResult_t tResult;
CSphQueryResult tQueryResult;
tQueryResult.m_pMeta = &tResult;
CSphMultiQueryArgs tArgs ( 1 );
SphQueueSettings_t tQueueSettings ( pIndex->GetMatchSchema() );
tQueueSettings.m_bComputeItems = true;
SphQueueRes_t tRes;
CSphString sError;
std::unique_ptr<ISphMatchSorter> pSorter ( sphCreateQueue ( tQueueSettings, tQuery, sError, tRes ) );
if ( !pSorter )
sphDie ( "failed to create sorter; error=%s", tResult.m_sError.cstr() );
int64_t iLocalTime = sphMicroTimer();
auto pRawSorter = pSorter.get();
if ( !pIndex->MultiQuery ( tQueryResult, tQuery, { &pRawSorter, 1 }, tArgs ) )
sphDie ( "query failed; error=%s", pIndex->GetLastError().cstr() );
iTime += sphMicroTimer()-iLocalTime;
int64_t iTotal = pRawSorter->GetTotalCount();
auto & tOneRes = tResult.m_dResults.Add ();
tOneRes.FillFromSorter ( pRawSorter );
ARRAY_FOREACH ( i, tOneRes.m_dMatches )
{
auto tRowID = tOneRes.m_dMatches[i].m_tRowID;
uHash = sphFNV64 ( &tRowID, sizeof(tRowID), uHash );
}
return iTotal;
}
static int RunGenericQuery ( CSphQuery & tQuery, CSphIndex * pIndex, int64_t & iTime, uint64_t & uHash, int iRepeats )
{
iTime = 0;
int64_t iRsetItems = 0;
const int WARMUP_RUNS = 2;
for ( auto i = 0; i < iRepeats; i++ )
{
iRsetItems = RunGenericQuery ( tQuery, pIndex, iTime, uHash );
if ( i < WARMUP_RUNS )
iTime = 0;
}
iTime /= iRepeats-WARMUP_RUNS;
return iRsetItems;
}
static void PrintStats ( const char * szHeader, int64_t iTime, float fEstimatedCost, uint64_t uHash )
{
printf ( "\n%s\n", szHeader );
printf ( "\t%d msec, estimated=%.3f\n", (int)( iTime/1000 ), fEstimatedCost );
printf ( "\thash=" UINT64_FMT "\n", uHash );
}
static void IgnoreAll ( CSphQuery & tQuery, const char * szName )
{
tQuery.m_dIndexHints.Add ( { szName, SecondaryIndexType_e::INDEX, false } );
tQuery.m_dIndexHints.Add ( { szName, SecondaryIndexType_e::ANALYZER, false } );
tQuery.m_dIndexHints.Add ( { szName, SecondaryIndexType_e::LOOKUP, false } );
}
static void ForceColumnar ( CSphQuery & tQuery, const char * szName )
{
tQuery.m_dIndexHints.Add ( { szName, SecondaryIndexType_e::ANALYZER, true } );
}
static void ForceSI( CSphQuery & tQuery, const char * szName )
{
tQuery.m_dIndexHints.Add ( { szName, SecondaryIndexType_e::INDEX, true } );
}
static void ForceLookup( CSphQuery & tQuery, const char * szName )
{
tQuery.m_dIndexHints.Add ( { szName, SecondaryIndexType_e::LOOKUP, true } );
}
static CSphQuery CreateFullscanQuery ()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
return tQuery;
}
static CSphQuery CreateFullscanQuery1()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "comment_ranking";
tFilter.m_iMaxValue = 5;
tFilter.m_bOpenLeft = true;
}
IgnoreAll ( tQuery, "comment_ranking" );
return tQuery;
}
static CSphQuery CreateFullscanQuery2()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "comment_ranking";
tFilter.m_iMinValue = 1000;
tFilter.m_bOpenRight = true;
}
IgnoreAll ( tQuery, "comment_ranking" );
return tQuery;
}
static CSphQuery CreateFullscanQuery3()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "comment_ranking";
tFilter.m_iMinValue = 50;
tFilter.m_bOpenRight = true;
}
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "story_comment_count";
tFilter.m_iMinValue = 50;
tFilter.m_bOpenRight = true;
}
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "author_comment_count";
tFilter.m_iMinValue = 500;
tFilter.m_bOpenRight = true;
}
IgnoreAll ( tQuery, "comment_ranking" );
IgnoreAll ( tQuery, "story_comment_count" );
IgnoreAll ( tQuery, "author_comment_count" );
return tQuery;
}
static CSphQuery CreateFullscanQuery4()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "comment_ranking";
tFilter.m_iMaxValue = 5;
tFilter.m_bOpenLeft = true;
}
tQuery.m_sSortBy = "story_comment_count asc";
IgnoreAll ( tQuery, "comment_ranking" );
return tQuery;
}
static void CalcCoeffsInit()
{
CSphString sError;
if ( !sphInitCharsetAliasTable(sError) )
sphDie ( "failed to init charset alias table: %s", sError.cstr() );
if ( !InitColumnar(sError) )
sphDie ( "failed to init MCL: %s", sError.cstr() );
if ( !InitSecondary ( sError ) )
sphDie ( "failed to init SI: %s", sError.cstr() );
}
static const float SCALE = 1000.0f;
static void CalcCoeffsRowwise()
{
CSphString sError, sWarning;
if ( !sphInitCharsetAliasTable ( sError ) )
sphDie ( "failed to init charset alias table: %s", sError.cstr() );
CSphString sPath = "hackernews_10x";
std::unique_ptr<CSphIndex> pIndex = sphCreateIndexPhrase ( sPath, sPath );
StrVec_t dWarnings;
if ( !pIndex->Prealloc ( false, nullptr, dWarnings ) )
sphDie ( "prealloc failed: %s", pIndex->GetLastError().cstr() );
pIndex->Preread();
auto tStats = pIndex->GetStats();
int64_t iTotalDocs = tStats.m_iTotalDocuments;
float fCostOfPush = 0.0f;
float fCostOfFilter = 0.0f;
const int REPEATS = 20;
{
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQuery();
int64_t iRsetItems = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fCost = float(iTime) / iRsetItems*SCALE;
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::FILTER );
fCostOfPush = fCost;
PrintStats ( "Fullscan, 0 filters", iTime, fEstimatedCost, uHash );
printf ( "\tCOST_PUSH=%.3f\n", fCost );
}
{
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQuery1();
// calculate the cost of single filter
// run a query that doesn't early reject any blocks of attributes
// FIXME! depends on the dataset
int64_t iRsetItems = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fTotalCost = float(iTime)*SCALE;
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::FILTER );
fCostOfFilter = ( fTotalCost - fCostOfPush*iRsetItems )/( float(iTotalDocs)/DOCINFO_INDEX_FREQ + iTotalDocs );
PrintStats ( "Fullscan, 1 non-selective filter", iTime, fEstimatedCost, uHash );
printf ( "\tCOST_FILTER=%.3f\n", fCostOfFilter );
}
{
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQuery2();
RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::FILTER );
PrintStats ( "Fullscan, 1 selective filter", iTime, fEstimatedCost, uHash );
}
{
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQuery3();
RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::FILTER );
PrintStats ( "Fullscan, 3 filters", iTime, fEstimatedCost, uHash );
}
{
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQuery4();
RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::FILTER );
PrintStats ( "Fullscan, 1 non-selective filter and sorting by 1 attribute", iTime, fEstimatedCost, uHash );
}
}
static CSphQuery CreateFullscanQueryC1()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"comment_ranking","comment_ranking"} );
tQuery.m_sSortBy = "story_comment_count asc";
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "comment_ranking";
tFilter.m_iMaxValue = 0;
tFilter.m_bOpenLeft = true;
}
ForceColumnar ( tQuery, "comment_ranking" );
return tQuery;
}
static CSphQuery CreateFullscanQueryC1b()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "story_comment_count";
tFilter.m_iMinValue = 1000;
tFilter.m_bOpenRight = true;
}
ForceColumnar ( tQuery, "comment_ranking" );
return tQuery;
}
static CSphQuery CreateFullscanQueryC2()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "comment_ranking";
tFilter.m_iMinValue = 0;
tFilter.m_bOpenRight = true;
}
ForceColumnar ( tQuery, "comment_ranking" );
return tQuery;
}
static CSphQuery CreateFullscanQueryC3()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "comment_ranking";
tFilter.m_iMinValue = 50;
tFilter.m_bOpenRight = true;
}
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "story_comment_count";
tFilter.m_iMinValue = 50;
tFilter.m_bOpenRight = true;
}
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "author_comment_count";
tFilter.m_iMinValue = 500;
tFilter.m_bOpenRight = true;
}
ForceColumnar ( tQuery, "comment_ranking" );
ForceColumnar ( tQuery, "story_comment_count" );
ForceColumnar ( tQuery, "author_comment_count" );
return tQuery;
}
class RowIdIteratorStub_c : public RowidIterator_i
{
public:
RowIdIteratorStub_c()
{
m_dValues.Resize ( rand()*150 );
for ( auto & i : m_dValues )
i = ( rand()*100 ) + ( rand() % 100 );
m_dValues.Uniq();
}
bool HintRowID ( RowID_t tRowID ) override { return true; }
int64_t GetNumProcessed() const override { return 0; }
void SetCutoff ( int iCutoff ) override {}
bool WasCutoffHit() const override { return 0; }
void AddDesc ( CSphVector<IteratorDesc_t> & dDesc ) const override {}
int GetNumValues() const { return m_dValues.GetLength(); }
bool GetNextRowIdBlock ( RowIdBlock_t & dRowIdBlock ) override
{
if ( m_iOffset>=m_dValues.GetLength() )
return false;
RowID_t * pStart = m_dValues.Begin() + m_iOffset;
RowID_t * pEnd = Min ( pStart + OFFSET_STEP, m_dValues.End() );
m_iOffset += OFFSET_STEP;
return ReturnIteratorResult ( pEnd, pStart, dRowIdBlock );
}
private:
CSphVector<RowID_t> m_dValues;
int m_iOffset = 0;
static const int OFFSET_STEP = 128;
};
static int64_t RunIntersectIterator ( int64_t & iTotalValues, uint64_t & uHash )
{
srand(0);
int64_t iTotalTime = 0;
const int REPEATS = 10;
for ( int i = 0; i < REPEATS; i++ )
{
const int NUM_ITERATORS = 3;
CSphVector<RowidIterator_i *> dIterators(NUM_ITERATORS);
for ( auto & i : dIterators )
{
auto pStub = new RowIdIteratorStub_c;
iTotalValues += pStub->GetNumValues();
i = pStub;
}
int64_t iTime = sphMicroTimer();
std::unique_ptr<RowidIterator_i> pIterator ( CreateIteratorIntersect ( dIterators, nullptr ) );
RowIdBlock_t dRowIdBlock;
while ( pIterator->GetNextRowIdBlock ( dRowIdBlock ) )
for ( auto i : dRowIdBlock )
uHash = sphFNV64 ( &i, sizeof(i), uHash );
iTotalTime += sphMicroTimer() - iTime;
}
return iTotalTime;
}
static void CalcCoeffsColumnar()
{
CSphString sPath = "hackernews_col10x";
std::unique_ptr<CSphIndex> pIndex = sphCreateIndexPhrase ( sPath, sPath );
StrVec_t dWarnings;
if ( !pIndex->Prealloc ( false, nullptr, dWarnings ) )
sphDie ( "prealloc failed: %s", pIndex->GetLastError().cstr() );
pIndex->Preread();
const int REPEATS = 20;
float fCostOfPush = 0.0f;
// pure scan; should be the same speed as rowwise
{
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQuery();
int64_t iRsetItems = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
fCostOfPush = float(iTime) / iRsetItems*SCALE;
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::ANALYZER );
PrintStats ( "Columnar fullscan, 0 filters", iTime, fEstimatedCost, uHash );
}
{
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryC1();
RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::ANALYZER );
PrintStats ( "Columnar fullscan, 1 selective filter", iTime, fEstimatedCost, uHash );
}
{
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryC1b();
RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::ANALYZER );
PrintStats ( "Columnar fullscan, 1 extremely selective filter", iTime, fEstimatedCost, uHash );
}
// determine analyzer read cost
// scan with sorting by 1 attribute
{
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryC2();
int64_t iRsetItems = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fTotalCost = float(iTime)*SCALE;
float fCostOfFilter = ( fTotalCost - fCostOfPush*iRsetItems )/iRsetItems;
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::ANALYZER );
PrintStats ( "Columnar fullscan, 1 filter", iTime, fEstimatedCost, uHash );
printf ( "\tCOST_COLUMNAR_FILTER=%.3f\n", fCostOfFilter );
}
{
uint64_t uHash = SPH_FNV64_SEED;
int64_t iTotalValues = 0;
int64_t iTime = RunIntersectIterator ( iTotalValues, uHash );
float fCostPerValue = float(iTime) / float(iTotalValues)*SCALE;
printf ( "\nIntersect iterator\n" );
printf ( "\tCOST_INTERSECT=%.3f\n", fCostPerValue );
printf ( "\thash=" UINT64_FMT "\n", uHash );
}
{
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryC3();
RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::ANALYZER );
PrintStats ( "Columnar fullscan, 3 filters", iTime, fEstimatedCost, uHash );
}
}
static CSphQuery CreateFullscanQueryC2_1()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_VALUES;
tFilter.m_sAttrName = "a_mva";
tFilter.m_dValues.Add(2652701560);
tFilter.m_dValues.Add(3800433230);
tFilter.m_eMvaFunc = SPH_MVAFUNC_ANY;
}
ForceColumnar ( tQuery, "a_mva" );
return tQuery;
}
static CSphQuery CreateFullscanQueryC2_2()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "a_int";
tFilter.m_iMinValue = 100;
tFilter.m_bOpenRight = true;
tFilter.m_eMvaFunc = SPH_MVAFUNC_ANY;
}
ForceColumnar ( tQuery, "a_int" );
return tQuery;
}
static void CalcCoeffsColumnar2()
{
CSphString sPath = "synth";
std::unique_ptr<CSphIndex> pIndex = sphCreateIndexPhrase ( sPath, sPath );
StrVec_t dWarnings;
if ( !pIndex->Prealloc ( false, nullptr, dWarnings ) )
sphDie ( "prealloc failed: %s", pIndex->GetLastError().cstr() );
pIndex->Preread();
{
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryC2_1();
RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, 100 );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::ANALYZER );
PrintStats ( "Columnar fullscan, mva filter of 2 values", iTime, fEstimatedCost, uHash );
}
{
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryC2_2();
RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, 100 );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::ANALYZER );
PrintStats ( "Columnar fullscan, nonselective int filter; generic encoding", iTime, fEstimatedCost, uHash );
}
}
static CSphQuery CreateFullscanQueryC3_1()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_STRING;
tFilter.m_sAttrName = "pickup_boroname";
tFilter.m_dStrings.Add("0");
tFilter.m_bExclude = true;
}
ForceColumnar ( tQuery, "pickup_boroname" );
return tQuery;
}
static CSphQuery CreateFullscanQueryC3_2()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "id";
tFilter.m_iMinValue = 1000;
tFilter.m_bOpenRight = true;
}
ForceColumnar ( tQuery, "id" );
return tQuery;
}
static CSphQuery CreateFullscanQueryC3_3()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_STRING;
tFilter.m_sAttrName = "pickup_ntaname";
tFilter.m_dStrings.Add("Upper West Side");
tFilter.m_bExclude = true;
}
ForceColumnar ( tQuery, "pickup_ntaname" );
return tQuery;
}
static void CalcCoeffsColumnar3()
{
CSphString sPath = "taxi1";
std::unique_ptr<CSphIndex> pIndex = sphCreateIndexPhrase ( "taxi1", sPath.cstr() );
StrVec_t dWarnings;
if ( !pIndex->Prealloc ( false, nullptr, dWarnings ) )
sphDie ( "prealloc failed: %s", pIndex->GetLastError().cstr() );
pIndex->Preread();
{
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryC3_1();
RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, 10 );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::ANALYZER );
PrintStats ( "Columnar fullscan, string(hash) filter of 1 value; table encoding", iTime, fEstimatedCost, uHash );
}
{
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryC3_2();
RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, 10 );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::ANALYZER );
PrintStats ( "Columnar fullscan, int range filter; generic encoding", iTime, fEstimatedCost, uHash );
}
{
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryC3_3();
RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, 10 );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::ANALYZER );
PrintStats ( "Columnar fullscan, string(hash) filter of 1 value; table encoding", iTime, fEstimatedCost, uHash );
}
}
static CSphQuery CreateFullscanQueryS1 ( int iValue )
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_VALUES;
tFilter.m_sAttrName = "comment_ranking";
tFilter.m_dValues.Add(iValue);
tFilter.m_bOpenRight = true;
}
ForceSI ( tQuery, "comment_ranking" );
return tQuery;
}
static CSphQuery CreateFullscanQueryS2()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "comment_ranking";
tFilter.m_iMinValue = 0;
tFilter.m_iMaxValue = 20;
tFilter.m_bHasEqualMin = true;
tFilter.m_bHasEqualMax = false;
}
ForceSI ( tQuery, "comment_ranking" );
return tQuery;
}
static CSphQuery CreateFullscanQueryS3 ( int iMin, int iMax )
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "comment_ranking";
tFilter.m_iMinValue = iMin;
tFilter.m_iMaxValue = iMax;
tFilter.m_bHasEqualMin = true;
tFilter.m_bHasEqualMax = false;
}
ForceSI ( tQuery, "comment_ranking" );
return tQuery;
}
static CSphQuery CreateFullscanQueryS4()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "comment_ranking";
tFilter.m_iMaxValue = 10;
tFilter.m_bOpenLeft = true;
}
ForceSI ( tQuery, "comment_ranking" );
return tQuery;
}
float g_fSICostOfPush = 0.0f;
// internal value; must be in sync with costestimate.cpp
const float COST_INDEX_ITERATOR_INIT = 200.0f;
static void CalcCoeffsSI()
{
CSphString sPath = "hackernews_10x";
std::unique_ptr<CSphIndex> pIndex = sphCreateIndexPhrase ( sPath, sPath );
StrVec_t dWarnings;
if ( !pIndex->Prealloc ( false, nullptr, dWarnings ) )
sphDie ( "prealloc failed: %s", pIndex->GetLastError().cstr() );
pIndex->Preread();
float fCostOfIndexReadSingle = 0.0f;
float fCostOfIndexReadDense = 0.0f;
// pure scan; should be the same speed as rowwise/columnar
{
const int REPEATS = 20;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQuery();
int64_t iRsetItems = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
g_fSICostOfPush = float(iTime) / iRsetItems*SCALE;
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::INDEX );
PrintStats ( "SI fullscan, 0 filters", iTime, fEstimatedCost, uHash );
}
{
const int REPEATS = 200;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryS1(10);
int64_t iRsetItems = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fTotalCost = float(iTime)*SCALE;
fCostOfIndexReadSingle = fTotalCost/iRsetItems - g_fSICostOfPush;
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::INDEX );
PrintStats ( "SI fullscan, 1 non-selective filter of 1 value", iTime, fEstimatedCost, uHash );
printf ( "\tCOST_INDEX_READ_SINGLE=%.3f\n", fCostOfIndexReadSingle );
}
{
const int REPEATS = 50;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryS4();
int64_t iRsetItems = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::INDEX );
// we have 11 iterators and 500k rset values
const int NUM_ITERATORS=11;
float fTotalCost = float(iTime)*SCALE - COST_INDEX_ITERATOR_INIT*NUM_ITERATORS;
fCostOfIndexReadDense = fTotalCost/iRsetItems - g_fSICostOfPush;
PrintStats ( "SI fullscan, range filter of 500k values", iTime, fEstimatedCost, uHash );
printf ( "\tCOST_INDEX_READ_DENSE=%.3f\n", fCostOfIndexReadDense );
}
{
const int REPEATS = 200;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryS1(500);
RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::INDEX );
PrintStats ( "SI fullscan, 1 selective filter of 1 value", iTime, fEstimatedCost, uHash );
}
{
const int REPEATS = 50;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryS2();
RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::INDEX );
PrintStats ( "SI fullscan, range filter of 20 values (bitmap union)", iTime, fEstimatedCost, uHash );
}
{
const int REPEATS = 50;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryS3 ( 0, 5 );
int64_t iRangeValues = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::INDEX );
float fTotalCostWUnion = float(iTime)*SCALE;
float fTotalCostWoUnion = iRangeValues * ( fCostOfIndexReadSingle + g_fSICostOfPush );
float fUnionCost = ( fTotalCostWUnion-fTotalCostWoUnion );
float fNLogN = iRangeValues*log2f(iRangeValues);
float fCoeff = fNLogN/fUnionCost;
PrintStats ( "SI fullscan, range filter of 5 values (queue union)", iTime, fEstimatedCost, uHash );
printf ( "\tunion coeff=%.3f, values=%d\n", fCoeff, (int)iRangeValues );
}
{
const int REPEATS = 50;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryS3 ( 25, 30 );
int64_t iRangeValues = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::INDEX );
float fTotalCostWUnion = float(iTime)*SCALE;
float fTotalCostWoUnion = iRangeValues * ( fCostOfIndexReadSingle + g_fSICostOfPush );
float fUnionCost = ( fTotalCostWUnion-fTotalCostWoUnion );
float fNLogN = iRangeValues*log2f(iRangeValues);
float fCoeff = fNLogN/fUnionCost;
PrintStats ( "SI fullscan, range filter of 5 values (queue union)", iTime, fEstimatedCost, uHash );
printf ( "\tunion coeff=%.3f, values=%d\n", fCoeff, (int)iRangeValues );
}
{
const int REPEATS = 50;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryS3 ( 50, 55 );
int64_t iRangeValues = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::INDEX );
float fTotalCostWUnion = float(iTime)*SCALE;
float fTotalCostWoUnion = iRangeValues * ( fCostOfIndexReadSingle + g_fSICostOfPush );
float fUnionCost = ( fTotalCostWUnion-fTotalCostWoUnion );
float fNLogN = iRangeValues*log2f(iRangeValues);
float fCoeff = fNLogN/fUnionCost;
PrintStats ( "SI fullscan, range filter of 5 values (queue union)", iTime, fEstimatedCost, uHash );
printf ( "\tunion coeff=%.3f, values=%d\n", fCoeff, (int)iRangeValues );
}
}
static CSphQuery CreateFullscanQueryS2_1()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "a_big_super_lc";
tFilter.m_iMinValue = 1000000000;
tFilter.m_bOpenRight = true;
}
ForceSI ( tQuery, "a_big_super_lc" );
return tQuery;
}
static CSphQuery CreateFullscanQueryS2_2()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "a_big";
tFilter.m_iMinValue = 1000000000;
tFilter.m_bOpenRight = true;
}
ForceSI ( tQuery, "a_big" );
return tQuery;
}
static void CalcCoeffsSI2()
{
CSphString sPath = "synth";
std::unique_ptr<CSphIndex> pIndex = sphCreateIndexPhrase ( sPath, sPath );
StrVec_t dWarnings;
if ( !pIndex->Prealloc ( false, nullptr, dWarnings ) )
sphDie ( "prealloc failed: %s", pIndex->GetLastError().cstr() );
pIndex->Preread();
float fCostOfIndexRead = 0.0f;
{
const int REPEATS = 10;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryS2_1();
int64_t iRsetItems = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
// we have about 5k iterators and 500k rset values
// so each iterator fetches ~100 values
// index read speed here is a lot slower
float fTotalCost = float(iTime)*SCALE - COST_INDEX_ITERATOR_INIT*5000;
fCostOfIndexRead = fTotalCost/iRsetItems - g_fSICostOfPush;
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::INDEX );
PrintStats ( "SI fullscan, 1 non-selective filter of 1 value", iTime, fEstimatedCost, uHash );
printf ( "\tCOST_INDEX_READ_SPARSE=%.3f\n", fCostOfIndexRead );
}
{
const int REPEATS = 10;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryS2_2();
int64_t iRsetItems = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::INDEX );
// we have about ~500k iterators and ~500k rset values in this query
// we don't have direct access to SI so we just hardcode it
const int NUM_ITERATORS = 500000;
float fCostOfIndexInit = ( float(iTime)*SCALE - ( g_fSICostOfPush + fCostOfIndexRead )*iRsetItems ) / NUM_ITERATORS;
PrintStats ( "SI fullscan, 500k iterators", iTime, fEstimatedCost, uHash );
printf ( "\tCOST_INDEX_ITERATOR_INIT=%.3f\n", fCostOfIndexInit );
}
}
static CSphQuery CreateFullscanQueryS3_1()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_VALUES;
tFilter.m_sAttrName = "a_mva";
tFilter.m_dValues.Add(2652701560);
tFilter.m_dValues.Add(3800433230);
tFilter.m_eMvaFunc = SPH_MVAFUNC_ANY;
}
ForceSI ( tQuery, "a_mva" );
return tQuery;
}
static void CalcCoeffsSI3()
{
// this is supposed to be columnar synth
CSphString sPath = "synth";
std::unique_ptr<CSphIndex> pIndex = sphCreateIndexPhrase ( sPath, sPath );
StrVec_t dWarnings;
if ( !pIndex->Prealloc ( false, nullptr, dWarnings ) )
sphDie ( "prealloc failed: %s", pIndex->GetLastError().cstr() );
pIndex->Preread();
{
const int REPEATS = 1000;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryS3_1();
RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::INDEX );
PrintStats ( "SI fullscan, any(mva) of 2 values", iTime, fEstimatedCost, uHash );
}
}
static CSphQuery CreateFullscanQueryS4_1()
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_STRING;
tFilter.m_sAttrName = "pickup_ntaname";
tFilter.m_dStrings.Add("Upper West Side");
}
ForceSI ( tQuery, "pickup_ntaname" );
return tQuery;
}
static void CalcCoeffsSI4()
{
CSphString sPath = "taxi6";
std::unique_ptr<CSphIndex> pIndex = sphCreateIndexPhrase ( "taxi", sPath.cstr() );
StrVec_t dWarnings;
if ( !pIndex->Prealloc ( false, nullptr, dWarnings ) )
sphDie ( "prealloc failed: %s", pIndex->GetLastError().cstr() );
pIndex->Preread();
{
const int REPEATS = 100;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryS4_1();
RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::INDEX );
PrintStats ( "SI fullscan, string filter of 1 value", iTime, fEstimatedCost, uHash );
}
}
static CSphQuery CreateFullscanQueryL1 ( int iMin, int iMax )
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_dItems.Add ( {"id","id"} );
{
auto & tFilter = tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "id";
tFilter.m_iMinValue = iMin;
tFilter.m_iMaxValue = iMax;
tFilter.m_bHasEqualMin = true;
tFilter.m_bHasEqualMax = false;
}
ForceLookup ( tQuery, "id" );
return tQuery;
}
static void CalcCoeffsLookup()
{
CSphString sPath = "hackernews_10x";
std::unique_ptr<CSphIndex> pIndex = sphCreateIndexPhrase ( sPath, sPath );
StrVec_t dWarnings;
if ( !pIndex->Prealloc ( false, nullptr, dWarnings ) )
sphDie ( "prealloc failed: %s", pIndex->GetLastError().cstr() );
pIndex->Preread();
float fCostOfPush = 0.0f;
// pure scan; should be the same speed as any other
{
const int REPEATS = 20;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQuery();
int64_t iRsetItems = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
fCostOfPush = float(iTime) / iRsetItems*SCALE;
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::LOOKUP );
PrintStats ( "Lookup fullscan, 0 filters", iTime, fEstimatedCost, uHash );
}
{
const int REPEATS = 200;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQueryL1(1,200000);
int64_t iRsetItems = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::LOOKUP );
float fCostOfLookupRead = float(iTime) / iRsetItems*SCALE - fCostOfPush;
PrintStats ( "Lookup fullscan, 1 range filter of 200k values", iTime, fEstimatedCost, uHash );
printf ( "\tCOST_LOOKUP_READ=%.3f\n", fCostOfLookupRead );
printf ( "\tvalues=%d\n", (int)iRsetItems );
}
}
static CSphQuery CreateFTTermQuery ( ESphRankMode eRanker, const char * szTerm )
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_sQuery = szTerm;
tQuery.m_eRanker = eRanker;
return tQuery;
}
static CSphQuery CreateFTQueryOR ( ESphRankMode eRanker )
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_sQuery = "hello | world";
tQuery.m_eRanker = eRanker;
return tQuery;
}
static CSphQuery CreateFTQueryAND ( ESphRankMode eRanker )
{
CSphQuery tQuery;
tQuery.m_iCutoff = 0;
tQuery.m_sQuery = "hello world";
tQuery.m_eRanker = eRanker;
return tQuery;
}
static void CalcCoeffsFT()
{
CSphString sPath = "hn";
std::unique_ptr<CSphIndex> pIndex = sphCreateIndexPhrase ( sPath, sPath );
StrVec_t dWarnings;
if ( !pIndex->Prealloc ( false, nullptr, dWarnings ) )
sphDie ( "prealloc failed: %s", pIndex->GetLastError().cstr() );
pIndex->Preread();
float fCostOfPush = 0.0f;
{
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFullscanQuery();
const int REPEATS = 20;
int64_t iRsetItems = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fCost = float(iTime) / iRsetItems*SCALE;
float fEstimatedCost = GetEstimatedCost ( tQuery, pIndex.get(), SecondaryIndexType_e::FILTER );
fCostOfPush = fCost;
PrintStats ( "Fullscan, 0 filters", iTime, fEstimatedCost, uHash );
printf ( "\tCOST_PUSH=%.3f\n", fCost );
}
float fCostOfRankerNone = 0.0f;
int64_t iRawTimeHello = 0;
{
const int REPEATS = 100;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFTTermQuery ( SPH_RANK_NONE, "hello" );
int64_t iRsetItems = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
fCostOfRankerNone = float(iTime) / iRsetItems*SCALE - fCostOfPush;
iRawTimeHello = iTime - int64_t(fCostOfPush/SCALE*iRsetItems);
PrintStats ( "FT term, ranker=none", iTime, 0.0f, uHash );
printf ( "\tCOST_FT_TERM=%.3f\n", fCostOfRankerNone );
}
int64_t iRawTimeWorld = 0;
{
const int REPEATS = 100;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFTTermQuery ( SPH_RANK_NONE, "world" );
int64_t iRsetItems = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
fCostOfRankerNone = float(iTime) / iRsetItems*SCALE - fCostOfPush;
iRawTimeWorld = iTime - int64_t(fCostOfPush/SCALE*iRsetItems);
PrintStats ( "FT term, ranker=none", iTime, 0.0f, uHash );
printf ( "\tCOST_FT_TERM=%.3f\n", fCostOfRankerNone );
}
{
const int REPEATS = 100;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFTTermQuery ( SPH_RANK_DEFAULT, "hello" );
int64_t iRsetItems = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fCost = float(iTime) / iRsetItems*SCALE - fCostOfRankerNone;
PrintStats ( "FT term, ranker=default", iTime, 0.0f, uHash );
printf ( "\tCOST_RANKER_DEFAULT=%.3f\n", fCost );
}
{
const int REPEATS = 100;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFTTermQuery ( SPH_RANK_PROXIMITY, "hello" );
int64_t iRsetItems = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fCost = float(iTime) / iRsetItems*SCALE - fCostOfRankerNone;
PrintStats ( "FT term, ranker=proximity", iTime, 0.0f, uHash );
printf ( "\tCOST_RANKER_PROXIMITY=%.3f\n", fCost );
}
{
const int REPEATS = 100;
int64_t iTime = 0;
uint64_t uHash = SPH_FNV64_SEED;
CSphQuery tQuery = CreateFTQueryOR ( SPH_RANK_NONE );
int64_t iRsetItems = RunGenericQuery ( tQuery, pIndex.get(), iTime, uHash, REPEATS );
float fCost = float(iTime - iRawTimeHello - iRawTimeWorld ) / iRsetItems*SCALE;
PrintStats ( "OR query, ranker=none", iTime, 0.0f, uHash );
printf ( "\tCOST_QUERY_OR=%.3f\n", fCost );
}
}
//////////////////////////////////////////////////////////////////////////
// BLOODY DIRTY HACK!!!
// definitions of AggrResult_t members just copy-pasted 'as is' from searchdaemon.cpp
int AggrResult_t::GetLength () const
{
int iCount = 0;
m_dResults.Apply ( [&iCount] ( const OneResultset_t & a ) { iCount += a.m_dMatches.GetLength (); } );
return iCount;
}
void AggrResult_t::ClampMatches ( int iLimit )
{
assert ( m_bSingle );
if ( !m_dResults.IsEmpty () )
m_dResults.First ().ClampMatches ( iLimit );
}
void AggrResult_t::ClampAllMatches ()
{
for ( auto& dResult : m_dResults )
dResult.ClampAllMatches();
}
int OneResultset_t::FillFromSorter ( ISphMatchSorter * pQueue )
{
if ( !pQueue )
return 0;
assert ( m_dMatches.IsEmpty () );
m_tSchema = *pQueue->GetSchema ();
if ( !pQueue->GetLength () )
return 0;
int iCopied = pQueue->Flatten ( m_dMatches.AddN ( pQueue->GetLength () ) );
m_dMatches.Resize ( iCopied );
return iCopied;
}
void OneResultset_t::ClampAllMatches ()
{
for ( auto& dMatch : m_dMatches )
m_tSchema.FreeDataPtrs ( dMatch );
m_dMatches.Reset();
}
void OneResultset_t::ClampMatches ( int iLimit )
{
assert ( iLimit>0 );
int iMatches = m_dMatches.GetLength ();
for ( int i = iLimit; i<iMatches; ++i )
m_tSchema.FreeDataPtrs ( m_dMatches[i] );
m_dMatches.Resize ( iLimit );
}
OneResultset_t::~OneResultset_t()
{
for ( auto & dMatch : m_dMatches )
m_tSchema.FreeDataPtrs ( dMatch );
}
//////////////////////////////////////////////////////////////////////////
int main ()
{
// threads should be initialized before memory allocations
char cTopOfMainStack;
Threads::Init();
Threads::PrepareMainThread ( &cTopOfMainStack );
setvbuf ( stdout, NULL, _IONBF, 0 );
#if _WIN32
SetProcessAffinityMask ( GetCurrentProcess(), 1 );
#endif
printf ( "RUNNING INTERNAL LIBSPHINX TESTS\n\n" );
#if 0
SetMaxChildrenThreads(1)0
WipeGlobalSchedulerOnShutdownAndFork()0
StartGlobalWorkPool()0
Threads::CallCoroutine ( [&] 0
CalcCoeffsInit()0
CalcCoeffsRowwise()0
CalcCoeffsColumnar();
CalcCoeffsColumnar2();
CalcCoeffsColumnar3();
CalcCoeffsSI();
CalcCoeffsSI2();
CalcCoeffsSI3();
CalcCoeffsSI4();
CalcCoeffsLookup();
CalcCoeffsFT();
} );
#endif
printf ( "\nSUCCESS\n" );
return 0;
}
| 53,941
|
C++
|
.cpp
| 1,599
| 31.265166
| 226
| 0.69534
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,846
|
sphinxaot.cpp
|
manticoresoftware_manticoresearch/src/sphinxaot.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2011-2016, Andrew Aksyonoff
// Copyright (c) 2011-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
// Based on AOT lemmatizer, http://aot.ru/
// Copyright (c) 2004-2014, Alexey Sokirko and others
//
#include "sphinxint.h"
#include "fileutils.h"
#include "sphinxstem.h"
#include "sphinxplugin.h"
#include "coroutine.h"
#include "tokenizer/token_filter.h"
#include "dict/word_forms.h"
//////////////////////////////////////////////////////////////////////////
// LEMMATIZER
//////////////////////////////////////////////////////////////////////////
const BYTE AOT_POS_UNKNOWN = 0xff;
const int AOT_MIN_PREDICTION_SUFFIX = 3;
const BYTE AOT_MORPH_ANNOT_CHAR = '+';
const int AOT_MAX_ALPHABET_SIZE = 54;
const DWORD AOT_NOFORM = 0xffffffffUL;
const DWORD AOT_ORIGFORM = 0xfffffffeUL;
static int g_iCacheSize = 262144; // in bytes, so 256K
#define AOT_MODEL_NO(_a) ((_a)>>18)
#define AOT_ITEM_NO(_a) (((_a)&0x3FFFF)>>9)
#define AOT_PREFIX_NO(_a) ((_a)&0x1FF)
/// morpohological form info
struct CMorphForm
{
BYTE m_FlexiaLen;
BYTE m_PrefixLen;
BYTE m_POS;
BYTE m_Dummy;
char m_Prefix[4];
char m_Flexia[24];
};
/// alphabet descriptor
struct AlphabetDesc_t
{
int m_iSize;
BYTE m_dCode2Alpha [ AOT_MAX_ALPHABET_SIZE ];
BYTE m_dCode2AlphaWA [ AOT_MAX_ALPHABET_SIZE ];
};
/// alphabet codec
class CABCEncoder : public ISphNoncopyable
{
public:
int m_AlphabetSize;
int m_Alphabet2Code[256];
int m_Alphabet2CodeWithoutAnnotator[256];
void InitAlphabet ( const AlphabetDesc_t & tDesc );
bool CheckABCWithoutAnnotator ( const BYTE * pWord ) const;
DWORD DecodeFromAlphabet ( const BYTE * sPath, int iPath ) const;
};
/// morphology automaton node, 1:31
/// 1 bit for "final or not" flag
/// 31 bits for index to relations (pointer to the first child)
struct CMorphAutomNode
{
DWORD m_Data;
DWORD GetChildrenStart() const { return m_Data&(0x80000000-1); }
bool IsFinal() const { return (m_Data&0x80000000) > 0; }
};
/// morphology automaton relation, 8:24
/// 8 bits for relational char (aka next char in current form)
/// 24 bites for index to nodes (pointer to the next level node)
struct CMorphAutomRelation
{
DWORD m_Data;
DWORD GetChildNo() const { return m_Data & 0xffffff; }
BYTE GetRelationalChar() const { return (BYTE)(m_Data>>24); }
};
/// morphology automaton
class CMorphAutomat : public CABCEncoder
{
protected:
CMorphAutomNode * m_pNodes;
int m_NodesCount;
CMorphAutomRelation * m_pRelations;
int m_RelationsCount;
int m_iCacheSize;
CSphTightVector<int> m_ChildrenCache;
void BuildChildrenCache ( int iCacheSize );
int FindStringAndPassAnnotChar ( const BYTE * pText ) const;
public:
CMorphAutomat ()
: m_pNodes ( NULL )
, m_NodesCount ( 0 )
, m_pRelations ( NULL )
, m_RelationsCount ( 0 )
, m_iCacheSize ( 0 )
{}
~CMorphAutomat ()
{
SafeDeleteArray ( m_pNodes );
SafeDeleteArray ( m_pRelations );
}
int GetChildrenCount ( int i ) const { return m_pNodes[i+1].GetChildrenStart() - m_pNodes[i].GetChildrenStart(); }
const CMorphAutomRelation * GetChildren ( int i ) const { return m_pRelations + m_pNodes[i].GetChildrenStart(); }
const CMorphAutomNode GetNode ( int i ) const { return m_pNodes[i]; }
public:
bool LoadPak ( CSphReader & rd, int iCacheSize );
void GetInnerMorphInfos ( const BYTE * pText, DWORD * Infos ) const;
int NextNode ( int NodeNo, BYTE Child ) const;
};
/// prediction data tuple
struct CPredictTuple
{
WORD m_ItemNo;
DWORD m_LemmaInfoNo;
BYTE m_PartOfSpeechNo;
};
/// flexia model is basically a vector of morphology forms
/// (there is other meta suff like per-model comments but that is now stripped)
typedef CSphVector<CMorphForm> CFlexiaModel;
/// lemmatizer
class CLemmatizer
{
protected:
static constexpr int MAX_PREFIX_LEN = 12;
static constexpr bool m_bMaximalPrediction = false;
bool m_bIsGerman;
BYTE m_UC[256];
CMorphAutomat m_FormAutomat;
CSphVector<WORD> m_LemmaFlexiaModel; ///< lemma id to flexia model id mapping
CSphVector<BYTE> m_NPSs;
int m_PrefixLen [ MAX_PREFIX_LEN ];
CSphVector<BYTE> m_PrefixBlob;
CMorphAutomat m_SuffixAutomat;
CSphVector<DWORD> m_ModelFreq;
bool IsPrefix ( const BYTE * sPrefix, int iLen ) const;
DWORD PredictPack ( const CPredictTuple & t ) const { return ( m_LemmaFlexiaModel [ t.m_LemmaInfoNo ]<<18 ) + ( t.m_ItemNo<<9 ); }
bool PredictFind ( const BYTE * pWord, int iLen, CSphVector<CPredictTuple> & res ) const;
void PredictFindRecursive ( int r, BYTE * sPath, int iPath, CSphVector<CPredictTuple> & Infos ) const;
void PredictByDataBase ( const BYTE * pWord, int iLen, DWORD * results, bool is_cap ) const;
public:
explicit CLemmatizer ( bool IsGerman = false )
: m_bIsGerman ( IsGerman )
, m_iLang ( 0 )
{}
CSphVector<CFlexiaModel> m_FlexiaModels; ///< flexia models
int m_iLang; ///< my language
bool LemmatizeWord ( BYTE * pWord, DWORD * results ) const;
bool LoadPak ( CSphReader & rd );
};
//////////////////////////////////////////////////////////////////////////
DWORD CABCEncoder::DecodeFromAlphabet ( const BYTE * sPath, int iPath ) const
{
DWORD c = 1;
DWORD Result = 0;
for ( const BYTE * sMax = sPath+iPath; sPath<sMax; sPath++ )
{
Result += m_Alphabet2CodeWithoutAnnotator[*sPath] * c;
c *= m_AlphabetSize - 1;
}
return Result;
}
bool CABCEncoder::CheckABCWithoutAnnotator ( const BYTE * pWord ) const
{
while ( *pWord )
if ( m_Alphabet2CodeWithoutAnnotator [ *pWord++ ]==-1 )
return false;
return true;
}
void CABCEncoder::InitAlphabet ( const AlphabetDesc_t & tDesc )
{
m_AlphabetSize = tDesc.m_iSize;
for ( int i=0; i<256; i++ )
{
m_Alphabet2Code[i] = -1;
m_Alphabet2CodeWithoutAnnotator[i] = -1;
}
for ( int i=0; i<m_AlphabetSize; i++ )
m_Alphabet2Code [ tDesc.m_dCode2Alpha[i] ] = i;
for ( int i=0; i<m_AlphabetSize-1; i++ )
m_Alphabet2CodeWithoutAnnotator [ tDesc.m_dCode2AlphaWA[i] ] = i;
}
//////////////////////////////////////////////////////////////////////////
void CMorphAutomat::BuildChildrenCache ( int iCacheSize )
{
iCacheSize /= AOT_MAX_ALPHABET_SIZE*4;
iCacheSize = Max ( iCacheSize, 0 );
m_iCacheSize = Min ( m_NodesCount, iCacheSize );
m_ChildrenCache.Resize ( m_iCacheSize*AOT_MAX_ALPHABET_SIZE );
m_ChildrenCache.Fill ( -1 );
for ( int NodeNo=0; NodeNo<m_iCacheSize; NodeNo++ )
{
const CMorphAutomRelation * pStart = m_pRelations + m_pNodes [ NodeNo ].GetChildrenStart();
const CMorphAutomRelation * pEnd = pStart + GetChildrenCount ( NodeNo );
for ( ; pStart!=pEnd; pStart++ )
{
const CMorphAutomRelation & p = *pStart;
m_ChildrenCache [ NodeNo*AOT_MAX_ALPHABET_SIZE + m_Alphabet2Code [ p.GetRelationalChar() ] ] = p.GetChildNo();
}
}
}
bool CMorphAutomat::LoadPak ( CSphReader & rd, int iCacheSize )
{
rd.Tag ( "automaton-nodes" );
m_NodesCount = rd.UnzipInt();
m_pNodes = new CMorphAutomNode [ m_NodesCount+1 ];
rd.GetBytes ( m_pNodes, m_NodesCount*sizeof(CMorphAutomNode) );
rd.Tag ( "automaton-relations" );
m_RelationsCount = rd.UnzipInt();
m_pRelations = new CMorphAutomRelation [ m_RelationsCount ];
rd.GetBytes ( m_pRelations, m_RelationsCount*sizeof(CMorphAutomRelation) );
if ( rd.GetErrorFlag() )
return false;
m_pNodes [ m_NodesCount ].m_Data = m_RelationsCount;
#if !USE_LITTLE_ENDIAN
for ( int i=0; i< m_NodesCount; ++i )
FlipEndianness ( &m_pNodes[i].m_Data );
for ( int i=0; i< m_RelationsCount; ++i )
FlipEndianness ( &m_pRelations[i].m_Data );
#endif
BuildChildrenCache ( iCacheSize );
return true;
}
int CMorphAutomat::NextNode ( int NodeNo, BYTE RelationChar ) const
{
if ( NodeNo<m_iCacheSize )
{
int z = m_Alphabet2Code [ RelationChar ];
if ( z==-1 )
return -1;
return m_ChildrenCache [ NodeNo*AOT_MAX_ALPHABET_SIZE + z ];
} else
{
const CMorphAutomRelation * pStart = m_pRelations + m_pNodes [ NodeNo ].GetChildrenStart();
const CMorphAutomRelation * pEnd = pStart + GetChildrenCount ( NodeNo );
for ( ; pStart!=pEnd; pStart++ )
{
const CMorphAutomRelation & p = *pStart;
if ( RelationChar==p.GetRelationalChar() )
return p.GetChildNo();
}
return -1;
}
}
int CMorphAutomat::FindStringAndPassAnnotChar ( const BYTE * pText ) const
{
int r = 0;
while ( *pText )
{
int nd = NextNode ( r, *pText++ );
if ( nd==-1 )
return -1;
r = nd;
}
return NextNode ( r, AOT_MORPH_ANNOT_CHAR ); // passing annotation char
}
void CMorphAutomat::GetInnerMorphInfos ( const BYTE * pText, DWORD * Infos ) const
{
*Infos = AOT_NOFORM;
int r = FindStringAndPassAnnotChar ( pText );
if ( r==-1 )
return;
// recursively get all interpretations
const int MAX_DEPTH = 32;
int iLevel = 0;
BYTE sPath[MAX_DEPTH];
int iChild[MAX_DEPTH];
int iChildMax[MAX_DEPTH];
iChild[0] = m_pNodes[r].GetChildrenStart();
iChildMax[0] = m_pNodes[r+1].GetChildrenStart();
while ( iLevel>=0 )
{
while ( iChild[iLevel]<iChildMax[iLevel] )
{
CMorphAutomRelation Rel = m_pRelations[iChild[iLevel]];
int NodeNo = Rel.GetChildNo();
sPath[iLevel] = Rel.GetRelationalChar();
iChild[iLevel]++;
if ( m_pNodes[NodeNo].IsFinal() )
{
*Infos++ = DecodeFromAlphabet ( sPath, iLevel+1 );
} else
{
iLevel++;
assert ( iLevel<MAX_DEPTH );
iChild[iLevel] = m_pNodes[NodeNo].GetChildrenStart();
iChildMax[iLevel] = m_pNodes[NodeNo+1].GetChildrenStart();
}
}
iLevel--;
}
*Infos = AOT_NOFORM;
}
//////////////////////////////////////////////////////////////////////////
void CLemmatizer::PredictFindRecursive ( int NodeNo, BYTE * sPath, int iPath, CSphVector<CPredictTuple> & Infos ) const
{
const CMorphAutomNode & N = m_SuffixAutomat.GetNode ( NodeNo );
if ( N.IsFinal() )
{
int i = 0;
while ( i<iPath && sPath[i]!=AOT_MORPH_ANNOT_CHAR )
i++;
int j = i+1;
while ( j<iPath && sPath[j]!=AOT_MORPH_ANNOT_CHAR )
j++;
int k = j+1;
while ( k<iPath && sPath[k]!=AOT_MORPH_ANNOT_CHAR )
k++;
CPredictTuple & A = Infos.Add();
A.m_PartOfSpeechNo = (BYTE) m_SuffixAutomat.DecodeFromAlphabet ( sPath+i+1, j-i-1 );
A.m_LemmaInfoNo = m_SuffixAutomat.DecodeFromAlphabet ( sPath+j+1, k-j-1 );
A.m_ItemNo = (WORD) m_SuffixAutomat.DecodeFromAlphabet ( sPath+k+1, iPath-k-1 );
}
int Count = m_SuffixAutomat.GetChildrenCount ( NodeNo );
for ( int i=0; i<Count; i++ )
{
const CMorphAutomRelation & p = m_SuffixAutomat.GetChildren ( NodeNo )[i];
sPath[iPath] = p.GetRelationalChar();
PredictFindRecursive ( p.GetChildNo(), sPath, iPath+1, Infos );
}
}
bool CLemmatizer::PredictFind ( const BYTE * pWord, int iLen, CSphVector<CPredictTuple> & res ) const
{
// FIXME? we might not want to predict words with annot char inside
// was: if (ReversedWordForm.find(AnnotChar) != string::npos) return false;
int r = 0;
int i = 0;
const BYTE * p = pWord + iLen;
for ( ; i<iLen; i++ )
{
int nd = m_SuffixAutomat.NextNode ( r, *--p );
if ( nd==-1 )
break;
r = nd;
}
// no prediction by suffix which is less than 3
if ( i<AOT_MIN_PREDICTION_SUFFIX )
return false;
assert ( r!=-1 );
BYTE sPath[128] = {0};
PredictFindRecursive ( r, sPath, 0, res );
return true;
}
bool CLemmatizer::IsPrefix ( const BYTE * sPrefix, int iLen ) const
{
// empty prefix is a prefix
if ( !iLen )
return true;
if ( iLen>=MAX_PREFIX_LEN || m_PrefixLen[iLen]<0 )
return false;
const BYTE * p = &m_PrefixBlob [ m_PrefixLen[iLen] ];
while ( *p==iLen )
{
if ( !memcmp ( p+1, sPrefix, iLen ) )
return true;
p += 1+iLen;
}
return false;
}
/// returns true if matched in dictionary, false if predicted
bool CLemmatizer::LemmatizeWord ( BYTE * pWord, DWORD * results ) const
{
constexpr bool bCap = false; // maybe when we manage to drag this all the way from tokenizer
constexpr bool bPredict = true;
// uppercase (and maybe other translations), check, and compute length
BYTE * p;
if ( m_iLang==AOT_RU )
{
for ( p = pWord; *p; p++ )
{
BYTE b = m_UC[*p];
// russian chars are in 0xC0..0xDF range
// avoid lemmatizing words with other chars in them
if ( ( b>>5 )!=6 )
{
*results = AOT_NOFORM;
return false;
}
// uppercase
*p = b;
}
} else ///< use the alphabet to reduce another letters
{
for ( p = pWord; *p; p++ )
{
BYTE b = m_UC[*p];
// english chars are in 0x61..0x7A range
// avoid lemmatizing words with other chars in them
if ( m_FormAutomat.m_Alphabet2CodeWithoutAnnotator[b]<0 )
{
*results = AOT_NOFORM;
return false;
}
// uppercase
*p = b;
}
}
int iLen = (int)( p-pWord );
// do dictionary lookup
m_FormAutomat.GetInnerMorphInfos ( pWord, results );
if ( *results!=AOT_NOFORM )
return true;
if_const ( !bPredict )
return false;
// attempt prediction by keyword suffix
// find the longest suffix that finds dictionary results
// require that suffix to be 4+ chars too
int iSuffix;
for ( iSuffix=1; iSuffix<=iLen-4; iSuffix++ )
{
m_FormAutomat.GetInnerMorphInfos ( pWord+iSuffix, results );
if ( *results!=AOT_NOFORM )
break;
}
// cancel suffix predictions with no hyphens, short enough
// known postfixes, and unknown prefixes
if ( pWord [ iSuffix-1 ]!='-'
&& ( iLen-iSuffix )<6
&& !IsPrefix ( pWord, iSuffix ) )
{
*results = AOT_NOFORM;
}
// cancel predictions by pronouns, eg [Sem'ykin'ym]
for ( DWORD * pRes=results; *pRes!=AOT_NOFORM; pRes++ )
if ( m_NPSs[ AOT_MODEL_NO ( *pRes ) ]==AOT_POS_UNKNOWN )
{
*results = AOT_NOFORM;
break;
}
// what, still no results?
if ( *results==AOT_NOFORM )
{
// attempt prediction by database
PredictByDataBase ( pWord, iLen, results, bCap );
// filter out too short flexias
DWORD * s = results;
DWORD * d = s;
while ( *s!=AOT_NOFORM )
{
const CMorphForm & F = m_FlexiaModels [ AOT_MODEL_NO(*s) ][ AOT_ITEM_NO(*s) ];
if ( F.m_FlexiaLen<iLen )
*d++ = *s;
s++;
}
*d = AOT_NOFORM;
}
return false;
}
void CLemmatizer::PredictByDataBase ( const BYTE * pWord, int iLen, DWORD * FindResults, bool is_cap ) const
{
// FIXME? handle all-consonant abbreviations anyway?
// was: if ( CheckAbbreviation ( InputWordStr, FindResults, is_cap ) ) return;
assert ( *FindResults==AOT_NOFORM );
DWORD * pOut = FindResults;
CSphVector<CPredictTuple> res;
// if the ABC is wrong this prediction yields too many variants
if ( m_FormAutomat.CheckABCWithoutAnnotator ( pWord ) )
PredictFind ( pWord, iLen, res );
// assume not more than 32 different pos
int has_nps[32];
for ( int i=0; i<32; i++ )
has_nps[i] = -1;
for ( const auto& tPredict : res )
{
BYTE PartOfSpeechNo = tPredict.m_PartOfSpeechNo;
if ( !m_bMaximalPrediction && has_nps[PartOfSpeechNo]!=-1 )
{
int iOldFreq = m_ModelFreq [ AOT_MODEL_NO ( FindResults[has_nps[PartOfSpeechNo]] ) ];
int iNewFreq = m_ModelFreq [ m_LemmaFlexiaModel [tPredict.m_LemmaInfoNo ] ];
if ( iOldFreq < iNewFreq )
FindResults [ has_nps [ PartOfSpeechNo ] ] = PredictPack ( tPredict );
continue;
}
has_nps [ PartOfSpeechNo ] = (int)( pOut-FindResults );
*pOut++ = PredictPack ( tPredict );
*pOut = AOT_NOFORM;
}
if ( has_nps[0]==-1 // no noun
|| ( is_cap && !m_bIsGerman ) ) // or can be a proper noun (except German, where all nouns are written uppercase)
{
static BYTE CriticalNounLetterPack[4] = "+++";
PredictFind ( CriticalNounLetterPack, AOT_MIN_PREDICTION_SUFFIX, res );
*pOut++ = PredictPack ( res.Last() );
*pOut = AOT_NOFORM;
}
}
bool CLemmatizer::LoadPak ( CSphReader & rd )
{
rd.Tag ( "sphinx-aot" );
int iVer = rd.UnzipInt();
if ( iVer!=1 )
return false;
rd.Tag ( "alphabet-desc" );
AlphabetDesc_t tDesc;
tDesc.m_iSize = rd.UnzipInt();
rd.GetBytes ( tDesc.m_dCode2Alpha, tDesc.m_iSize );
rd.GetBytes ( tDesc.m_dCode2AlphaWA, tDesc.m_iSize );
m_FormAutomat.InitAlphabet ( tDesc );
m_SuffixAutomat.InitAlphabet ( tDesc );
rd.Tag ( "uc-table" );
rd.GetBytes ( m_UC, 256 );
// caching forms can help a lot (from 4% with 256K cache to 13% with 110M cache)
rd.Tag ( "forms-automaton" );
m_FormAutomat.LoadPak ( rd, g_iCacheSize );
rd.Tag ( "flexia-models" );
m_FlexiaModels.Resize ( rd.UnzipInt() );
ARRAY_FOREACH ( i, m_FlexiaModels )
{
m_FlexiaModels[i].Resize ( rd.UnzipInt() );
ARRAY_FOREACH ( j, m_FlexiaModels[i] )
{
CMorphForm & F = m_FlexiaModels[i][j];
F.m_FlexiaLen = (BYTE) rd.GetByte();
rd.GetBytes ( F.m_Flexia, F.m_FlexiaLen );
F.m_PrefixLen = (BYTE) rd.GetByte();
rd.GetBytes ( F.m_Prefix, F.m_PrefixLen );
F.m_POS = (BYTE) rd.GetByte();
assert ( F.m_FlexiaLen<sizeof(F.m_Flexia) );
assert ( F.m_PrefixLen<sizeof(F.m_Prefix) );
F.m_Flexia[F.m_FlexiaLen] = 0;
F.m_Prefix[F.m_PrefixLen] = 0;
}
}
rd.Tag ( "prefixes" );
for ( int i=0; i<MAX_PREFIX_LEN; i++ )
m_PrefixLen[i] = rd.UnzipInt();
m_PrefixBlob.Resize ( rd.UnzipInt() );
rd.GetBytes ( m_PrefixBlob.Begin(), m_PrefixBlob.GetLength() );
rd.Tag ( "lemma-flexia-models" );
m_LemmaFlexiaModel.Resize ( rd.UnzipInt() );
ARRAY_FOREACH ( i, m_LemmaFlexiaModel )
m_LemmaFlexiaModel[i] = (WORD) rd.UnzipInt();
// build model freqs
m_ModelFreq.Resize ( m_FlexiaModels.GetLength() );
m_ModelFreq.Fill ( 0 );
ARRAY_FOREACH ( i, m_LemmaFlexiaModel )
m_ModelFreq [ m_LemmaFlexiaModel[i] ]++;
rd.Tag ( "nps-vector" );
m_NPSs.Resize ( rd.UnzipInt() );
rd.GetBytes ( m_NPSs.Begin(), m_NPSs.GetLength() );
// caching predictions does not measurably affect performance though
rd.Tag ( "prediction-automaton" );
m_SuffixAutomat.LoadPak ( rd, 0 );
rd.Tag ( "eof" );
return !rd.GetErrorFlag();
}
//////////////////////////////////////////////////////////////////////////
// SPHINX MORPHOLOGY INTERFACE
//////////////////////////////////////////////////////////////////////////
const char* AOT_LANGUAGES[AOT_LENGTH] = {"ru", "en", "de", "uk" };
static CLemmatizer * g_pLemmatizers[AOT_LENGTH] = {0};
static CSphNamedInt g_tDictinfos[AOT_LENGTH];
void sphAotSetCacheSize ( int iCacheSize )
{
g_iCacheSize = Max ( iCacheSize, 0 );
}
static bool LoadLemmatizerUk ( CSphString & sError );
bool AotInit ( const CSphString & sDictFile, CSphString & sError, int iLang )
{
if ( g_pLemmatizers[iLang] )
return true;
if ( iLang==AOT_UK )
return LoadLemmatizerUk ( sError );
CSphAutofile rdFile;
if ( rdFile.Open ( sDictFile, SPH_O_READ, sError )<0 )
return false;
g_pLemmatizers[iLang] = new CLemmatizer ( iLang==AOT_DE );
g_pLemmatizers[iLang]->m_iLang = iLang;
CSphReader rd;
rd.SetFile ( rdFile );
if ( !g_pLemmatizers[iLang]->LoadPak(rd) )
{
sError.SetSprintf ( "failed to load lemmatizer dictionary: %s", rd.GetErrorMessage().cstr() );
SafeDelete ( g_pLemmatizers[iLang] );
return false;
}
// track dictionary crc
DWORD uCrc;
if ( !sphCalcFileCRC32 ( sDictFile.cstr(), uCrc ) )
{
sError.SetSprintf ( "failed to crc32 lemmatizer dictionary %s", sDictFile.cstr() );
SafeDelete ( g_pLemmatizers[iLang] );
return false;
}
// extract basename
const char * a = sDictFile.cstr();
const char * b = a + strlen(a) - 1;
while ( b>a && b[-1]!='/' && b[-1]!='\\' )
b--;
g_tDictinfos[iLang] = { b, (int) uCrc };
return true;
}
bool sphAotInit ( const CSphString & sDictFile, CSphString & sError, int iLang )
{
return AotInit ( sDictFile, sError, iLang );
}
static inline bool IsAlpha1251 ( BYTE c )
{
return ( c>=0xC0 || c==0xA8 || c==0xB8 );
}
static inline bool IsGermanAlpha1252 ( BYTE c )
{
if ( c==0xb5 || c==0xdf )
return true;
BYTE lc = c | 0x20;
switch ( lc )
{
case 0xe2:
case 0xe4:
case 0xe7:
case 0xe8:
case 0xe9:
case 0xea:
case 0xf1:
case 0xf4:
case 0xf6:
case 0xfb:
case 0xfc:
return true;
default:
return ( lc>0x60 && lc<0x7b );
}
}
static inline bool IsAlphaAscii ( BYTE c )
{
BYTE lc = c | 0x20;
return ( lc>0x60 && lc<0x7b );
}
enum EMMITERS {EMIT_1BYTE, EMIT_UTF8RU, EMIT_UTF8};
template < EMMITERS >
inline BYTE * Emit ( BYTE * sOut, BYTE uChar )
{
if ( uChar=='-' )
return sOut;
*sOut++ = uChar | 0x20;
return sOut;
}
template<>
inline BYTE * Emit<EMIT_UTF8RU> ( BYTE * sOut, BYTE uChar )
{
if ( uChar=='-' )
return sOut;
assert ( uChar!=0xA8 && uChar!=0xB8 ); // no country for yo
uChar |= 0x20; // lowercase, E0..FF range now
if ( uChar & 0x10 )
{
// F0..FF -> D1 80..D1 8F
*sOut++ = 0xD1;
*sOut++ = uChar - 0x70;
} else
{
// E0..EF -> D0 B0..D0 BF
*sOut++ = 0xD0;
*sOut++ = uChar - 0x30;
}
return sOut;
}
template<>
inline BYTE * Emit<EMIT_UTF8> ( BYTE * sOut, BYTE uChar )
{
if ( uChar=='-' )
return sOut;
if ( uChar!=0xDF ) // don't touch 'ss' umlaut
uChar |= 0x20;
if ( uChar & 0x80 )
{
*sOut++ = 0xC0 | (uChar>>6);
*sOut++ = 0x80 | (uChar&0x3F); // NOLINT
} else
*sOut++ = uChar;
return sOut;
}
template < EMMITERS IS_UTF8 >
inline void CreateLemma ( BYTE * sOut, const BYTE * sBase, int iBaseLen, bool bFound, const CFlexiaModel & M, const CMorphForm & F )
{
// cut the form prefix
int PrefixLen = F.m_PrefixLen;
if ( bFound || strncmp ( (const char*)sBase, F.m_Prefix, PrefixLen )==0 )
{
sBase += PrefixLen;
iBaseLen -= PrefixLen;
}
// FIXME! maybe handle these lemma wide prefixes too?
#if 0
const string & LemmPrefix = m_pParent->m_Prefixes[m_InnerAnnot.m_PrefixNo];
if ( m_bFound
|| (
( m_InputWordBase.substr ( 0, LemmPrefix.length() )==LemmPrefix ) &&
( m_InputWordBase.substr ( LemmPrefix.length(), F.m_PrefixStr.length() )==F.m_PrefixStr ) ) )
{
m_InputWordBase.erase ( 0, LemmPrefix.length()+ M.m_PrefixStr.length() );
m_bPrefixesWereCut = true;
}
#endif
// cut the form suffix and append the lemma suffix
// UNLESS this was a predicted form, and form suffix does not fully match!
// eg. word=GUBARIEVICHA, flexion=IEIVICHA, so this is not really a matching lemma
int iSuff = F.m_FlexiaLen;
if ( bFound || ( iBaseLen>=iSuff && strncmp ( (const char*)sBase+iBaseLen-iSuff, F.m_Flexia, iSuff )==0 ) )
{
// ok, found and/or suffix matches, the usual route
int iCodePoints = 0;
iBaseLen -= iSuff;
while ( iBaseLen-- && iCodePoints<SPH_MAX_WORD_LEN )
{
sOut = Emit<IS_UTF8> ( sOut, *sBase++ );
iCodePoints++;
}
int iLemmaSuff = M[0].m_FlexiaLen;
const char * sFlexia = M[0].m_Flexia;
while ( iLemmaSuff-- && iCodePoints<SPH_MAX_WORD_LEN ) // OPTIMIZE? can remove len here
{
sOut = Emit<IS_UTF8> ( sOut, *sFlexia++ );
iCodePoints++;
}
} else
{
// whoops, no suffix match, just copy and lowercase the current base
while ( iBaseLen-- )
sOut = Emit<IS_UTF8> ( sOut, *sBase++ );
}
*sOut = '\0';
}
static inline bool IsRuFreq2 ( BYTE * pWord )
{
if ( pWord[2]!=0 )
return false;
int iCode = ( ( pWord[0]<<8 ) + pWord[1] ) | 0x2020;
switch ( iCode )
{
case 0xEDE0: // na
case 0xEFEE: // po
case 0xEDE5: // ne
case 0xEEF2: // ot
case 0xE7E0: // za
case 0xEEE1: // ob
case 0xE4EE: // do
case 0xF1EE: // so
case 0xE8E7: // iz
case 0xE8F5: // ih
case 0xF8F2: // sht
case 0xF3EB: // ul
return true;
}
return false;
}
static inline bool IsEnFreq2 ( BYTE * )
{
// stub
return false;
}
static inline bool IsDeFreq2 ( BYTE * )
{
// stub
return false;
}
static inline bool IsRuFreq3 ( BYTE * pWord )
{
if ( pWord[3]!=0 )
return false;
int iCode = ( ( pWord[0]<<16 ) + ( pWord[1]<<8 ) + pWord[2] ) | 0x202020;
return ( iCode==0xE8EBE8 || iCode==0xE4EBFF || iCode==0xEFF0E8 // ili, dlya, pri
|| iCode==0xE3EEE4 || iCode==0xF7F2EE || iCode==0xE1E5E7 ); // god, chto, bez
}
static inline bool IsRuNeed2 ( BYTE * pWord )
{
if ( pWord[2]!=0 )
return false;
int iCode = ( ( pWord[0]<<8 ) + ( pWord[1] ) ) | 0x2020;
return ( iCode==0xECEC || iCode==0xF1EC || iCode==0xEAEC || iCode==0xEAE3 ); // mm, cm, km, kg
}
static inline bool IsEnFreq3 ( BYTE * )
{
// stub
return false;
}
static inline bool IsDeFreq3 ( BYTE * )
{
// stub
return false;
}
void sphAotLemmatizeRu1251 ( BYTE * pWord, int iLen )
{
// i must be initialized
assert ( g_pLemmatizers[AOT_RU] );
// pass-through 1-char words, and non-Russian words
if ( !IsAlpha1251(*pWord) || !pWord[1] )
return;
// handle a few most frequent 2-char, 3-char pass-through words
if ( iLen==2 && IsRuFreq2 ( pWord ))
return;
if ( iLen==3 && IsRuFreq3 ( pWord ))
return;
// do lemmatizing
// input keyword moves into sForm; LemmatizeWord() will also case fold sForm
// we will generate results using sForm into pWord; so we need this extra copy
BYTE sForm[MAX_KEYWORD_BYTES];
int iFormLen = 0;
// faster than strlen and strcpy..
for ( BYTE * p=pWord; *p; )
sForm[iFormLen++] = *p++;
sForm[iFormLen] = '\0';
DWORD FindResults[12]; // max results is like 6
bool bFound = g_pLemmatizers[AOT_RU]->LemmatizeWord ( (BYTE*)sForm, FindResults );
if ( FindResults[0]==AOT_NOFORM )
return;
// pick a single form
// picks a noun, if possible, and otherwise prefers shorter forms
bool bNoun = false;
for ( int i=0; FindResults[i]!=AOT_NOFORM; i++ )
{
const CFlexiaModel & M = g_pLemmatizers[AOT_RU]->m_FlexiaModels [ AOT_MODEL_NO ( FindResults[i] ) ];
const CMorphForm & F = M [ AOT_ITEM_NO ( FindResults[i] ) ];
bool bNewNoun = ( F.m_POS==0 );
if ( i==0 || ( !bNoun && bNewNoun ) )
{
CreateLemma<EMIT_1BYTE> ( pWord, sForm, iFormLen, bFound, M, F );
bNoun = bNewNoun;
} else if ( bNoun==bNewNoun )
{
BYTE sBuf[256];
CreateLemma<EMIT_1BYTE> ( sBuf, sForm, iFormLen, bFound, M, F );
if ( strcmp ( (char*)sBuf, (char*)pWord )<0 )
strcpy ( (char*)pWord, (char*)sBuf ); // NOLINT
}
}
}
void sphAotLemmatize ( BYTE * pWord, int iLang )
{
// i must be initialized
assert ( g_pLemmatizers[iLang] );
// pass-through 1-char words, and non-Russian words
if ( !IsAlphaAscii(*pWord) || !pWord[1] )
return;
// handle a few most frequent 2-char, 3-char pass-through words
if ( iLang==AOT_EN && ( IsEnFreq2(pWord) || IsEnFreq3(pWord) ) )
return;
if ( iLang==AOT_DE && ( IsDeFreq2(pWord) || IsDeFreq3(pWord) ) )
return;
// do lemmatizing
// input keyword moves into sForm; LemmatizeWord() will also case fold sForm
// we will generate results using sForm into pWord; so we need this extra copy
BYTE sForm[MAX_KEYWORD_BYTES];
int iFormLen = 0;
// faster than strlen and strcpy..
for ( BYTE * p=pWord; *p; )
sForm[iFormLen++] = *p++;
sForm[iFormLen] = '\0';
// do nothing with one-char words
if ( iFormLen<=1 )
return;
DWORD FindResults[12]; // max results is like 6
bool bFound = g_pLemmatizers[iLang]->LemmatizeWord ( (BYTE*)sForm, FindResults );
if ( FindResults[0]==AOT_NOFORM )
return;
// pick a single form
// picks a noun, if possible, and otherwise prefers shorter forms
bool bNoun = false;
for ( int i=0; FindResults[i]!=AOT_NOFORM; i++ )
{
const CFlexiaModel & M = g_pLemmatizers[iLang]->m_FlexiaModels [ AOT_MODEL_NO ( FindResults[i] ) ];
const CMorphForm & F = M [ AOT_ITEM_NO ( FindResults[i] ) ];
bool bNewNoun = ( F.m_POS==0 );
if ( i==0 || ( !bNoun && bNewNoun ) )
{
CreateLemma<EMIT_1BYTE> ( pWord, sForm, iFormLen, bFound, M, F );
bNoun = bNewNoun;
} else if ( bNoun==bNewNoun )
{
BYTE sBuf[256];
CreateLemma<EMIT_1BYTE> ( sBuf, sForm, iFormLen, bFound, M, F );
if ( strcmp ( (char*)sBuf, (char*)pWord )<0 )
strcpy ( (char*)pWord, (char*)sBuf ); // NOLINT
}
}
}
static inline bool IsRussianAlphaUtf8 ( const BYTE * pWord )
{
// letters, windows-1251, utf-8
// A..YA, C0..DF, D0 90..D0 AF
// a..p, E0..EF, D0 B0..D0 BF
// r..ya, F0..FF, D1 80..D1 8F
// YO, A8, D0 81
// yo, B8, D1 91
if ( pWord[0]==0xD0 )
if ( pWord[1]==0x81 || ( pWord[1]>=0x90 && pWord[1]<0xC0 ) )
return true;
if ( pWord[0]==0xD1 )
if ( pWord[1]>=0x80 && pWord[1]<=0x91 && pWord[1]!=0x90 )
return true;
return false;
}
void sphAotLemmatizeDe1252 ( BYTE * pWord, int iLen )
{
// i must be initialized
assert ( g_pLemmatizers[AOT_DE] );
// pass-through 1-char words, and non-German words
if ( !IsGermanAlpha1252(*pWord) || !pWord[1] )
return;
// handle a few most frequent 2-char, 3-char pass-through words
if ( iLen==2 && IsDeFreq2 ( pWord ))
return;
if ( iLen==3 && IsDeFreq3 ( pWord ))
return;
// do lemmatizing
// input keyword moves into sForm; LemmatizeWord() will also case fold sForm
// we will generate results using sForm into pWord; so we need this extra copy
BYTE sForm[MAX_KEYWORD_BYTES];
int iFormLen = 0;
// faster than strlen and strcpy..
for ( BYTE * p=pWord; *p; )
sForm[iFormLen++] = *p++;
sForm[iFormLen] = '\0';
DWORD FindResults[12]; // max results is like 6
bool bFound = g_pLemmatizers[AOT_DE]->LemmatizeWord ( (BYTE*)sForm, FindResults );
if ( FindResults[0]==AOT_NOFORM )
return;
// pick a single form
// picks a noun, if possible, and otherwise prefers shorter forms
bool bNoun = false;
for ( int i=0; FindResults[i]!=AOT_NOFORM; i++ )
{
const CFlexiaModel & M = g_pLemmatizers[AOT_DE]->m_FlexiaModels [ AOT_MODEL_NO ( FindResults[i] ) ];
const CMorphForm & F = M [ AOT_ITEM_NO ( FindResults[i] ) ];
bool bNewNoun = ( F.m_POS==0 );
if ( i==0 || ( !bNoun && bNewNoun ) )
{
CreateLemma<EMIT_1BYTE> ( pWord, sForm, iFormLen, bFound, M, F );
bNoun = bNewNoun;
} else if ( bNoun==bNewNoun )
{
BYTE sBuf[256];
CreateLemma<EMIT_1BYTE> ( sBuf, sForm, iFormLen, bFound, M, F );
if ( strcmp ( (char*)sBuf, (char*)pWord )<0 )
strcpy ( (char*)pWord, (char*)sBuf ); // NOLINT
}
}
}
/// returns length in bytes (aka chars) if all letters were russian and converted
/// returns 0 and aborts early if non-russian letters are encountered
static inline int Utf8ToWin1251 ( BYTE * pOut, const BYTE * pWord )
{
// YO, win A8, utf D0 81
// A..YA, win C0..DF, utf D0 90..D0 AF
// a..p, win E0..EF, utf D0 B0..D0 BF
// r..ya, win F0..FF, utf D1 80..D1 8F
// yo, win B8, utf D1 91
static const BYTE dTable[128] =
{
0, 0xa8, 0, 0, 0, 0, 0, 0, // 00
0, 0, 0, 0, 0, 0, 0, 0, // 08
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, // 10
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, // 18
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, // 20
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, // 28
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, // 30
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, // 38
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, // 40
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, // 48
0, 0xb8, 0, 0, 0, 0, 0, 0, // 50
0, 0, 0, 0, 0, 0, 0, 0, // 58
0, 0, 0, 0, 0, 0, 0, 0, // 60
0, 0, 0, 0, 0, 0, 0, 0, // 68
0, 0, 0, 0, 0, 0, 0, 0, // 70
0, 0, 0, 0, 0, 0, 0, 0 // 78
};
BYTE * pStart = pOut;
while ( *pWord )
{
// russian utf-8 letters begin with either D0 or D1
// and any valid 2nd utf-8 byte must be in 80..BF range
if ( ( *pWord & 0xFE )!=0xD0 )
return 0;
assert ( pWord[1]>=0x80 && pWord[1]<0xC0 );
// table index D0 80..BF to 0..3F, and D1 80..BF to 40..7F
BYTE uWin = dTable [ ( pWord[1] & 0x7F ) + ( ( pWord[0] & 1 )<<6 ) ];
pWord += 2;
if ( !uWin )
return 0;
*pOut++ = uWin;
}
*pOut = '\0';
return (int)( pOut-pStart );
}
/// returns length in bytes (aka chars) if all letters were converted
/// returns 0 and aborts early if non-western letters are encountered
static inline int Utf8ToWin1252 ( BYTE * pOut, const BYTE * pWord )
{
BYTE * pStart = pOut;
while ( *pWord )
{
if ( (*pWord)&0x80 )
{
if ( ((*pWord)&0xFC)==0xC0 )
{
*pOut++ = ( pWord[1] & 0x7F ) + ( ( pWord[0] & 3 )<<6 );
pWord += 2;
} else
return 0;
} else
*pOut++ = *pWord++;
}
*pOut = '\0';
return (int)( pOut-pStart );
}
static inline bool IsGermanAlphaUtf8 ( const BYTE * pWord )
{
// letters, windows-1252, utf-8
// A..Z, trivial
if ( pWord[0]>0x40 && pWord[0]<0x5b )
return true;
// a..z, also trivial
if ( pWord[0]>0x60 && pWord[0]<0x7b )
return true;
// mu, 0xb5
if ( pWord[0]==0xC2 && pWord[1]==0xB5 )
return true;
// some upper
if ( pWord[0]==0xC3 )
{
if ( pWord[1]==0X9F ) // ss umlaut
return true;
switch ( pWord[1] | 0x20 )
{
case 0xA2: // umlauts
case 0xA4:
case 0xA7:
case 0xA8:
case 0xA9:
case 0xAa:
case 0xB1:
case 0xB4:
case 0xB6:
case 0xBb:
case 0xBc:
return true;
}
}
return false;
}
static inline void Win1251ToLowercaseUtf8 ( BYTE * pOut, const BYTE * pWord )
{
while ( *pWord )
{
// a..p, E0..EF maps to D0 B0..D0 BF
// r..ya, F0..FF maps to D1 80..D1 8F
// yo maps to D1 91
if ( *pWord>=0xC0 )
{
BYTE iCh = ( *pWord | 0x20 ); // lowercase
BYTE iF = ( iCh>>4 ) & 1; // 0xE? or 0xF? value
*pOut++ = 0xD0 + iF;
*pOut++ = iCh - 0x30 - ( iF<<6 );
} else if ( *pWord==0xA8 || *pWord==0xB8 )
{
*pOut++ = 0xD1;
*pOut++ = 0x91;
} else
assert ( false );
pWord++;
}
*pOut++ = '\0';
}
static inline void Win1252ToLowercaseUtf8 ( BYTE * pOut, const BYTE * pWord )
{
while ( *pWord )
{
if ( !((*pWord)&0x80) )
*pOut++ = *pWord | 0x20;
else
{
*pOut++ = 0xC0 | ((*pWord)>>6);
*pOut++ = 0x80 | ((*pWord)&0x3F);
}
++pWord;
}
*pOut++ = '\0';
}
void sphAotLemmatizeRuUTF8 ( BYTE * pWord )
{
// i must be initialized
assert ( g_pLemmatizers[AOT_RU] );
// only if the word is russian
if ( !IsRussianAlphaUtf8(pWord) )
return;
// convert to Windows-1251
// failure means we should not lemmatize this
BYTE sBuf [ SPH_MAX_WORD_LEN+4 ];
auto iFormLen = Utf8ToWin1251 ( sBuf, pWord );
if ( !iFormLen )
return;
// lemmatize, convert back, done!
sphAotLemmatizeRu1251 ( sBuf, iFormLen );
Win1251ToLowercaseUtf8 ( pWord, sBuf );
}
void sphAotLemmatizeDeUTF8 ( BYTE * pWord )
{
// i must be initialized
assert ( g_pLemmatizers[AOT_DE] );
// only if the word is german
if ( !IsGermanAlphaUtf8(pWord) )
return;
// convert to Windows-1252
// failure means we should not lemmatize this
BYTE sBuf [ SPH_MAX_WORD_LEN+4 ];
auto iFormLen = Utf8ToWin1252 ( sBuf, pWord );
if ( !iFormLen )
return;
// lemmatize, convert back, done!
sphAotLemmatizeDe1252 ( sBuf, iFormLen );
Win1252ToLowercaseUtf8 ( pWord, sBuf );
}
void sphAotLemmatizeRu ( StrVec_t & dLemmas, const BYTE * pWord )
{
assert ( g_pLemmatizers[AOT_RU] );
if ( !IsRussianAlphaUtf8(pWord) )
return;
BYTE sForm [ SPH_MAX_WORD_LEN+4 ];
int iFormLen = 0;
iFormLen = Utf8ToWin1251 ( sForm, pWord );
if ( iFormLen<2 || IsRuFreq2(sForm) )
return;
if ( ( iFormLen<3 || IsRuFreq3(sForm) ) && !IsRuNeed2(sForm) )
return;
DWORD FindResults[12]; // max results is like 6
bool bFound = g_pLemmatizers[AOT_RU]->LemmatizeWord ( (BYTE*)sForm, FindResults );
if ( FindResults[0]==AOT_NOFORM )
return;
for ( int i=0; FindResults[i]!=AOT_NOFORM; i++ )
{
const CFlexiaModel & M = g_pLemmatizers[AOT_RU]->m_FlexiaModels [ AOT_MODEL_NO ( FindResults[i] ) ];
const CMorphForm & F = M [ AOT_ITEM_NO ( FindResults[i] ) ];
BYTE sRes [ 3*SPH_MAX_WORD_LEN+4 ];
CreateLemma<EMIT_UTF8RU> ( sRes, sForm, iFormLen, bFound, M, F );
dLemmas.Add ( (const char*)sRes );
}
// OPTIMIZE?
dLemmas.Uniq();
}
void sphAotLemmatizeDe ( StrVec_t & dLemmas, const BYTE * pWord )
{
assert ( g_pLemmatizers[AOT_DE] );
if ( !IsGermanAlphaUtf8(pWord) )
return;
BYTE sForm [ SPH_MAX_WORD_LEN+4 ];
int iFormLen = 0;
iFormLen = Utf8ToWin1252 ( sForm, pWord );
if ( iFormLen<=1 )
return;
if ( IsDeFreq2(sForm) || IsDeFreq3(sForm) )
return;
DWORD FindResults[12]; // max results is like 6
bool bFound = g_pLemmatizers[AOT_DE]->LemmatizeWord ( (BYTE*)sForm, FindResults );
if ( FindResults[0]==AOT_NOFORM )
return;
for ( int i=0; FindResults[i]!=AOT_NOFORM; i++ )
{
const CFlexiaModel & M = g_pLemmatizers[AOT_DE]->m_FlexiaModels [ AOT_MODEL_NO ( FindResults[i] ) ];
const CMorphForm & F = M [ AOT_ITEM_NO ( FindResults[i] ) ];
BYTE sRes [ 3*SPH_MAX_WORD_LEN+4 ];
CreateLemma<EMIT_UTF8> ( sRes, sForm, iFormLen, bFound, M, F );
dLemmas.Add ( (const char*)sRes );
}
// OPTIMIZE?
dLemmas.Uniq();
}
// generic lemmatize for other languages
void sphAotLemmatize ( StrVec_t & dLemmas, const BYTE * pWord, int iLang )
{
assert ( iLang!=AOT_RU ); // must be processed by the specialized function
assert ( g_pLemmatizers[iLang] );
if ( !IsAlphaAscii(*pWord) )
return;
BYTE sForm [ SPH_MAX_WORD_LEN+4 ];
int iFormLen = 0;
while ( *pWord )
sForm [ iFormLen++ ] = *pWord++;
sForm [ iFormLen ] = '\0';
if ( iFormLen<=1 )
return;
if ( iLang==AOT_EN && ( IsEnFreq2(sForm) || IsEnFreq3(sForm) ) )
return;
if ( iLang==AOT_DE && ( IsDeFreq2(sForm) || IsDeFreq3(sForm) ) )
return;
DWORD FindResults[12]; // max results is like 6
bool bFound = g_pLemmatizers[iLang]->LemmatizeWord ( (BYTE*)sForm, FindResults );
if ( FindResults[0]==AOT_NOFORM )
return;
for ( int i=0; FindResults[i]!=AOT_NOFORM; i++ )
{
const CFlexiaModel & M = g_pLemmatizers[iLang]->m_FlexiaModels [ AOT_MODEL_NO ( FindResults[i] ) ];
const CMorphForm & F = M [ AOT_ITEM_NO ( FindResults[i] ) ];
BYTE sRes [ 3*SPH_MAX_WORD_LEN+4 ];
CreateLemma<EMIT_1BYTE> ( sRes, sForm, iFormLen, bFound, M, F );
dLemmas.Add ( (const char*)sRes );
}
// OPTIMIZE?
dLemmas.Uniq();
}
const CSphNamedInt & sphAotDictinfo ( int iLang )
{
return g_tDictinfos[iLang];
}
//////////////////////////////////////////////////////////////////////////
#define MAX_EXTRA_TOKENS 12
/// token filter for AOT morphology indexing
/// AOT may return multiple (!) morphological hypotheses for a single token
/// we return such additional hypotheses as blended tokens
class CSphAotTokenizerTmpl : public CSphTokenFilter
{
protected:
using Base = CSphTokenFilter;
BYTE m_sForm[MAX_KEYWORD_BYTES];
int m_iFormLen = 0; ///< in bytes, but in windows-1251 that is characters, too
bool m_bFound = false; ///< found or predicted?
DWORD m_FindResults[MAX_EXTRA_TOKENS]; ///< max results is like 6
int m_iCurrent = -1; ///< index in m_FindResults that was just returned, -1 means no blending
BYTE m_sToken[MAX_KEYWORD_BYTES]; ///< to hold generated lemmas
BYTE m_sOrigToken[MAX_KEYWORD_BYTES]; ///< to hold original token
bool m_bIndexExact;
const CSphWordforms * m_pWordforms = nullptr;
public:
CSphAotTokenizerTmpl ( TokenizerRefPtr_c pTok, const DictRefPtr_c& pDict, bool bIndexExact, int DEBUGARG(iLang) )
: CSphTokenFilter ( std::move (pTok) )
{
assert ( g_pLemmatizers[iLang] );
m_FindResults[0] = AOT_NOFORM;
if ( pDict )
{
// tricky bit
// one does not simply take over the wordforms from the dict
// that would break saving of the (embedded) wordforms data
// but as this filter replaces wordforms
m_pWordforms = pDict->GetWordforms();
pDict->DisableWordforms();
}
m_bIndexExact = bIndexExact;
}
bool TokenIsBlended() const noexcept final
{
return m_iCurrent>=0 || m_pTokenizer->TokenIsBlended();
}
uint64_t GetSettingsFNV () const noexcept final
{
uint64_t uHash = CSphTokenFilter::GetSettingsFNV();
uHash ^= (uint64_t)m_pWordforms;
DWORD uFlags = m_bIndexExact ? 1 : 0;
uHash = sphFNV64 ( &uFlags, sizeof(uFlags), uHash );
return uHash;
}
};
class CSphAotTokenizerRu : public CSphAotTokenizerTmpl
{
public:
CSphAotTokenizerRu ( TokenizerRefPtr_c pTok, const DictRefPtr_c& pDict, bool bIndexExact )
: CSphAotTokenizerTmpl ( std::move (pTok), pDict, bIndexExact, AOT_RU )
{}
TokenizerRefPtr_c Clone ( ESphTokenizerClone eMode ) const noexcept final
{
// this token filter must NOT be created as escaped
// it must only be used during indexing time, NEVER in searching time
assert ( eMode==SPH_CLONE_INDEX );
auto * pClone = new CSphAotTokenizerRu ( m_pTokenizer->Clone ( eMode ), nullptr, m_bIndexExact );
if ( m_pWordforms )
pClone->m_pWordforms = m_pWordforms;
return TokenizerRefPtr_c { pClone };
}
BYTE * GetToken() final
{
m_eTokenMorph = SPH_TOKEN_MORPH_RAW;
// any pending lemmas left?
if ( m_iCurrent>=0 )
{
++m_iCurrent;
assert ( m_FindResults[m_iCurrent]!=AOT_NOFORM );
// return original token
if ( m_FindResults[m_iCurrent]==AOT_ORIGFORM )
{
assert ( m_FindResults[m_iCurrent+1]==AOT_NOFORM );
strncpy ( (char*)m_sToken, (char*)m_sOrigToken, sizeof(m_sToken)-1 );
m_iCurrent = -1;
m_eTokenMorph = SPH_TOKEN_MORPH_ORIGINAL;
return m_sToken;
}
// generate that lemma
const CFlexiaModel & M = g_pLemmatizers[AOT_RU]->m_FlexiaModels [ AOT_MODEL_NO ( m_FindResults [ m_iCurrent ] ) ];
const CMorphForm & F = M [ AOT_ITEM_NO ( m_FindResults [ m_iCurrent ] ) ];
CreateLemma<EMIT_UTF8RU> ( m_sToken, m_sForm, m_iFormLen, m_bFound, M, F );
// is this the last one? gotta tag it non-blended
if ( m_FindResults [ m_iCurrent+1 ]==AOT_NOFORM )
m_iCurrent = -1;
if ( m_pWordforms && m_pWordforms->m_bHavePostMorphNF )
m_pWordforms->ToNormalForm ( m_sToken, false, false );
m_eTokenMorph = SPH_TOKEN_MORPH_GUESS;
return m_sToken;
}
// ok, time to work on a next word
assert ( m_iCurrent<0 );
BYTE * pToken = Base::GetToken();
m_eTokenMorph = m_pTokenizer->GetTokenMorph();
if ( !pToken )
return nullptr;
// pass-through blended parts
if ( m_pTokenizer->TokenIsBlended() )
return pToken;
// pass-through matched wordforms
if ( m_pWordforms && m_pWordforms->ToNormalForm ( pToken, true, false ) )
return pToken;
// pass-through 1-char "words"
if ( pToken[1]=='\0' )
return pToken;
// pass-through non-Russian words
if ( !IsRussianAlphaUtf8 ( pToken ) )
return pToken;
// convert or copy regular tokens
m_iFormLen = Utf8ToWin1251 ( m_sForm, pToken );
// do nothing with one-char words
if ( m_iFormLen<=1 )
return pToken;
// handle a few most frequent 2-char, 3-char pass-through words
// OPTIMIZE? move up?
if ( IsRuFreq2 ( m_sForm ) || IsRuFreq3 ( m_sForm ) )
return pToken;
// lemmatize
m_bFound = g_pLemmatizers[AOT_RU]->LemmatizeWord ( m_sForm, m_FindResults );
if ( m_FindResults[0]==AOT_NOFORM )
{
assert ( m_iCurrent<0 );
return pToken;
}
// schedule original form for return, if needed
if ( m_bIndexExact )
{
int i = 1;
while ( m_FindResults[i]!=AOT_NOFORM )
i++;
m_FindResults[i] = AOT_ORIGFORM;
m_FindResults[i+1] = AOT_NOFORM;
strncpy ( (char*)m_sOrigToken, (char*)pToken, sizeof(m_sOrigToken) );
m_sOrigToken[sizeof(m_sOrigToken)-1] = '\0';
}
// in any event, prepare the first lemma for return
const CFlexiaModel & M = g_pLemmatizers[AOT_RU]->m_FlexiaModels [ AOT_MODEL_NO ( m_FindResults[0] ) ];
const CMorphForm & F = M [ AOT_ITEM_NO ( m_FindResults[0] ) ];
CreateLemma<EMIT_UTF8RU> ( pToken, m_sForm, m_iFormLen, m_bFound, M, F );
// schedule lemmas 2+ for return
if ( m_FindResults[1]!=AOT_NOFORM )
m_iCurrent = 0;
// suddenly, post-morphology wordforms
if ( m_pWordforms && m_pWordforms->m_bHavePostMorphNF )
m_pWordforms->ToNormalForm ( pToken, false, false );
m_eTokenMorph = SPH_TOKEN_MORPH_GUESS;
return pToken;
}
};
class CSphAotTokenizer : public CSphAotTokenizerTmpl
{
AOT_LANGS m_iLang;
public:
CSphAotTokenizer ( TokenizerRefPtr_c pTok, const DictRefPtr_c& pDict, bool bIndexExact, int iLang )
: CSphAotTokenizerTmpl ( std::move (pTok), pDict, bIndexExact, iLang )
, m_iLang ( AOT_LANGS(iLang) )
{}
TokenizerRefPtr_c Clone ( ESphTokenizerClone eMode ) const noexcept final
{
// this token filter must NOT be created as escaped
// it must only be used during indexing time, NEVER in searching time
assert ( eMode==SPH_CLONE_INDEX );
auto * pClone = new CSphAotTokenizer ( m_pTokenizer->Clone ( eMode ), nullptr, m_bIndexExact, m_iLang );
if ( m_pWordforms )
pClone->m_pWordforms = m_pWordforms;
return TokenizerRefPtr_c { pClone };
}
BYTE * GetToken() final
{
m_eTokenMorph = SPH_TOKEN_MORPH_RAW;
// any pending lemmas left?
if ( m_iCurrent>=0 )
{
++m_iCurrent;
assert ( m_FindResults[m_iCurrent]!=AOT_NOFORM );
// return original token
if ( m_FindResults[m_iCurrent]==AOT_ORIGFORM )
{
assert ( m_FindResults[m_iCurrent+1]==AOT_NOFORM );
strncpy ( (char*)m_sToken, (char*)m_sOrigToken, sizeof(m_sToken) );
m_sToken[sizeof ( m_sToken ) - 1] = '\0';
m_iCurrent = -1;
m_eTokenMorph = SPH_TOKEN_MORPH_ORIGINAL;
return m_sToken;
}
// generate that lemma
const CFlexiaModel & M = g_pLemmatizers[m_iLang]->m_FlexiaModels [ AOT_MODEL_NO ( m_FindResults [ m_iCurrent ] ) ];
const CMorphForm & F = M [ AOT_ITEM_NO ( m_FindResults [ m_iCurrent ] ) ];
CreateLemma<EMIT_UTF8> ( m_sToken, m_sForm, m_iFormLen, m_bFound, M, F );
// is this the last one? gotta tag it non-blended
if ( m_FindResults [ m_iCurrent+1 ]==AOT_NOFORM )
m_iCurrent = -1;
if ( m_pWordforms && m_pWordforms->m_bHavePostMorphNF )
m_pWordforms->ToNormalForm ( m_sToken, false, false );
m_eTokenMorph = SPH_TOKEN_MORPH_GUESS;
return m_sToken;
}
// ok, time to work on a next word
assert ( m_iCurrent<0 );
BYTE * pToken = Base::GetToken();
m_eTokenMorph = m_pTokenizer->GetTokenMorph();
if ( !pToken )
return nullptr;
// pass-through blended parts
if ( m_pTokenizer->TokenIsBlended() )
return pToken;
// pass-through matched wordforms
if ( m_pWordforms && m_pWordforms->ToNormalForm ( pToken, true, false ) )
return pToken;
// pass-through 1-char "words"
if ( pToken[1]=='\0' )
return pToken;
// pass-through non-Russian words
if ( m_iLang==AOT_DE )
{
if ( !IsGermanAlphaUtf8 ( pToken ) )
return pToken;
} else
{
if ( !IsGermanAlpha1252 ( pToken[0] ) )
return pToken;
}
// convert or copy regular tokens
if ( m_iLang==AOT_DE )
m_iFormLen = Utf8ToWin1252 ( m_sForm, pToken );
else
{
// manual strlen and memcpy; faster this way
BYTE * p = pToken;
m_iFormLen = 0;
while ( *p )
m_sForm [ m_iFormLen++ ] = *p++;
m_sForm [ m_iFormLen ] = '\0';
}
// do nothing with one-char words
if ( m_iFormLen<=1 )
return pToken;
// handle a few most frequent 2-char, 3-char pass-through words
// OPTIMIZE? move up?
if ( ( m_iLang==AOT_DE && ( IsDeFreq2 ( m_sForm ) || IsDeFreq3 ( m_sForm ) ) )
|| ( m_iLang==AOT_EN && ( IsEnFreq2 ( m_sForm ) || IsEnFreq3 ( m_sForm ) ) ) )
return pToken;
// lemmatize
m_bFound = g_pLemmatizers[m_iLang]->LemmatizeWord ( m_sForm, m_FindResults );
if ( m_FindResults[0]==AOT_NOFORM )
{
assert ( m_iCurrent<0 );
return pToken;
}
// schedule original form for return, if needed
if ( m_bIndexExact )
{
int i = 1;
while ( m_FindResults[i]!=AOT_NOFORM )
i++;
m_FindResults[i] = AOT_ORIGFORM;
m_FindResults[i+1] = AOT_NOFORM;
strncpy ( (char*)m_sOrigToken, (char*)pToken, sizeof(m_sOrigToken) );
m_sOrigToken[sizeof ( m_sOrigToken ) - 1] = '\0';
}
// in any event, prepare the first lemma for return
const CFlexiaModel & M = g_pLemmatizers[m_iLang]->m_FlexiaModels [ AOT_MODEL_NO ( m_FindResults[0] ) ];
const CMorphForm & F = M [ AOT_ITEM_NO ( m_FindResults[0] ) ];
CreateLemma<EMIT_UTF8> ( pToken, m_sForm, m_iFormLen, m_bFound, M, F );
// schedule lemmas 2+ for return
if ( m_FindResults[1]!=AOT_NOFORM )
m_iCurrent = 0;
// suddenly, post-morphology wordforms
if ( m_pWordforms && m_pWordforms->m_bHavePostMorphNF )
m_pWordforms->ToNormalForm ( pToken, false, false );
m_eTokenMorph = SPH_TOKEN_MORPH_GUESS;
return pToken;
}
};
class LemmatizerUk_c : public LemmatizerTrait_i
{
void * m_pUserdata = nullptr;
PluginTokenFilterRefPtr_c m_tPlugin;
public:
LemmatizerUk_c();
virtual ~LemmatizerUk_c() override;
BYTE * GetToken ( const BYTE * pWord, int & iExtra ) override;
BYTE * GetExtraToken() override;
};
class TokenizerUk_c : public CSphAotTokenizerTmpl
{
LemmatizerUk_c m_tLemmatizer;
public:
TokenizerUk_c ( TokenizerRefPtr_c pTok, const DictRefPtr_c& pDict, bool bIndexExact );
TokenizerRefPtr_c Clone ( ESphTokenizerClone eMode ) const noexcept final;
BYTE * GetToken() final;
};
void sphAotTransformFilter ( TokenizerRefPtr_c& pTokenizer, const DictRefPtr_c& pDict, bool bIndexExact, DWORD uLangMask )
{
assert ( uLangMask!=0 );
for ( int i=AOT_BEGIN; i<AOT_LENGTH; ++i )
{
if ( uLangMask & (1UL<<i) )
{
switch ( i )
{
case AOT_RU:
pTokenizer = new CSphAotTokenizerRu ( pTokenizer, pDict, bIndexExact );
break;
case AOT_UK:
pTokenizer = new TokenizerUk_c ( pTokenizer, pDict, bIndexExact );
break;
default:
pTokenizer = new CSphAotTokenizer ( pTokenizer, pDict, bIndexExact, i );
}
}
}
}
void sphAotShutdown ()
{
for ( auto& pLemmantizer : g_pLemmatizers )
SafeDelete ( pLemmantizer );
}
#if _WIN32
static CSphString g_sLemmatizerUkLib = "lemmatize_uk.dll";
#else
static CSphString g_sLemmatizerUkLib = "lemmatize_uk.so";
#endif
static CSphString g_sLemmatizerFnName = "luk";
static const int g_iLemmatizerUkStackSize = 1024 * 256; // 256k needed for python init
bool LoadLemmatizerUk ( CSphString & sError )
{
assert ( !g_pLemmatizers[AOT_UK] );
if ( !sphPluginExists ( PLUGIN_INDEX_TOKEN_FILTER, g_sLemmatizerFnName.cstr() ) )
{
bool bLoaded = false;
// indexer does not have couroutunes
if ( Threads::IsInsideCoroutine() )
{
bLoaded = Threads::Coro::ContinueBool ( g_iLemmatizerUkStackSize, [&sError]
{
return sphPluginCreate ( g_sLemmatizerUkLib.cstr(), PLUGIN_INDEX_TOKEN_FILTER, g_sLemmatizerFnName.cstr(), SPH_ATTR_NONE, true, sError );
});
} else
{
bLoaded = sphPluginCreate ( g_sLemmatizerUkLib.cstr(), PLUGIN_INDEX_TOKEN_FILTER, g_sLemmatizerFnName.cstr(), SPH_ATTR_NONE, true, sError );
}
if ( !bLoaded )
return false;
}
g_pLemmatizers[AOT_UK] = new CLemmatizer ( false );
g_pLemmatizers[AOT_UK]->m_iLang = AOT_UK;
g_tDictinfos[AOT_UK] = { g_sLemmatizerFnName, (int)sphCRC32 ( g_sLemmatizerFnName.cstr() ) };
return true;
}
LemmatizerUk_c::LemmatizerUk_c ()
{
m_tPlugin = PluginGet<PluginTokenFilter_c> ( PLUGIN_INDEX_TOKEN_FILTER, g_sLemmatizerFnName.cstr() );
if ( !m_tPlugin )
return;
CSphVector<const char*> dFields;
m_tPlugin->m_fnInit ( &m_pUserdata, dFields.GetLength(), dFields.Begin(), nullptr, nullptr );
}
LemmatizerUk_c::~LemmatizerUk_c()
{
if ( m_tPlugin->m_fnDeinit )
m_tPlugin->m_fnDeinit ( m_pUserdata );
}
BYTE * LemmatizerUk_c::GetToken ( const BYTE * pWord, int & iExtra )
{
if ( !m_tPlugin )
return (BYTE *)pWord;
int iPosDelta = 0;
return (BYTE*)m_tPlugin->m_fnPushToken ( m_pUserdata, (char*)pWord, &iExtra, &iPosDelta );
}
BYTE * LemmatizerUk_c::GetExtraToken()
{
if ( !m_tPlugin )
return nullptr;
int iPosDelta = 0;
return (BYTE*)m_tPlugin->m_fnGetExtraToken ( m_pUserdata, &iPosDelta );
}
static bool SkipNonUkToken ( const BYTE * pWord )
{
// pass-through 1-char "words"
if ( pWord[1]=='\0' )
return true;
int iCodepoints = 0;
int iCode = 0;
while ( ( iCode = sphUTF8Decode ( pWord ) )>0 )
{
iCodepoints++;
if ( iCode>=0x400 && iCode<=0x4ff )
continue;
// allow non_cjk uk mapping too
if ( iCode==0x69 || iCode==0x73 || iCode==0xE6 )
continue;
// pass-through words with non ukrainian chars
return true;
}
// pass-through 1-char "words"
return ( iCodepoints<2 );
}
void sphAotLemmatizeUk ( BYTE * pWord, LemmatizerTrait_i * pLemmatizer )
{
if ( !pLemmatizer )
return;
if ( SkipNonUkToken ( pWord ) )
return;
int iExtraCount = 0;
const BYTE * pDst = pLemmatizer->GetToken ( pWord, iExtraCount );
strcpy ( (char*)pWord, (char*)pDst ); // NOLINT
}
void sphAotLemmatizeUk ( StrVec_t & dLemmas, const BYTE * pWord, LemmatizerTrait_i * pLemmatizer )
{
if ( !pLemmatizer )
return;
if ( SkipNonUkToken ( pWord ) )
return;
int iExtraCount = 0;
dLemmas.Add ( (const char *)pLemmatizer->GetToken ( pWord, iExtraCount ) );
iExtraCount = Min ( iExtraCount, MAX_EXTRA_TOKENS );
for ( int i=0; i<iExtraCount; i++ )
dLemmas.Add ( (const char *)pLemmatizer->GetExtraToken () );
dLemmas.Uniq();
}
std::unique_ptr<LemmatizerTrait_i> CreateLemmatizer ( int iLang )
{
if ( iLang!=AOT_UK )
return nullptr;
return std::make_unique<LemmatizerUk_c>();
}
TokenizerUk_c::TokenizerUk_c ( TokenizerRefPtr_c pTok, const DictRefPtr_c& pDict, bool bIndexExact )
: CSphAotTokenizerTmpl ( std::move (pTok), pDict, bIndexExact, AOT_UK )
{
}
TokenizerRefPtr_c TokenizerUk_c::Clone ( ESphTokenizerClone eMode ) const noexcept
{
// this token filter must NOT be created as escaped
// it must only be used during indexing time, NEVER in searching time
assert ( eMode==SPH_CLONE_INDEX );
auto * pClone = new TokenizerUk_c ( m_pTokenizer->Clone ( eMode ), nullptr, m_bIndexExact );
if ( m_pWordforms )
pClone->m_pWordforms = m_pWordforms;
return TokenizerRefPtr_c { pClone };
}
BYTE * TokenizerUk_c::GetToken()
{
m_eTokenMorph = SPH_TOKEN_MORPH_RAW;
// any pending lemmas left?
if ( m_iCurrent>=0 )
{
++m_iCurrent;
assert ( m_FindResults[m_iCurrent]!=AOT_NOFORM );
// return original token
if ( m_FindResults[m_iCurrent]==AOT_ORIGFORM )
{
assert ( m_FindResults[m_iCurrent+1]==AOT_NOFORM );
strncpy ( (char*)m_sToken, (char*)m_sOrigToken, sizeof(m_sToken) );
m_sToken[sizeof ( m_sToken ) - 1] = '\0';
m_iCurrent = -1;
m_eTokenMorph = SPH_TOKEN_MORPH_ORIGINAL;
return m_sToken;
}
// generate that extra lemma
BYTE * pToken = m_tLemmatizer.GetExtraToken();
// is this the last one? gotta tag it non-blended
if ( m_FindResults [ m_iCurrent+1 ]==AOT_NOFORM )
m_iCurrent = -1;
if ( m_pWordforms && m_pWordforms->m_bHavePostMorphNF )
m_pWordforms->ToNormalForm ( pToken, false, false );
m_eTokenMorph = SPH_TOKEN_MORPH_GUESS;
return pToken;
}
// ok, time to work on a next word
assert ( m_iCurrent<0 );
BYTE * pToken = Base::GetToken();
m_eTokenMorph = m_pTokenizer->GetTokenMorph();
if ( !pToken )
return nullptr;
// pass-through blended parts
if ( m_pTokenizer->TokenIsBlended() )
return pToken;
// pass-through matched wordforms
if ( m_pWordforms && m_pWordforms->ToNormalForm ( pToken, true, false ) )
return pToken;
if ( SkipNonUkToken ( pToken ) )
return pToken;
// lemmatize
int iExtra = 0;
pToken = m_tLemmatizer.GetToken ( pToken, iExtra );
// FIXME!!! implement token pass throu
m_FindResults[0] = 0;
int iLastEmpty = 1;
int iTokensEnd = Min ( iLastEmpty+iExtra, MAX_EXTRA_TOKENS-1 );
for ( ; iLastEmpty<iTokensEnd; iLastEmpty++ )
m_FindResults[iLastEmpty] = 0;
m_FindResults[iLastEmpty] = AOT_NOFORM;
// schedule original form for return, if needed, will be last token
if ( m_bIndexExact )
{
iLastEmpty = Min ( iLastEmpty, MAX_EXTRA_TOKENS-2 );
m_FindResults[iLastEmpty] = AOT_ORIGFORM;
m_FindResults[iLastEmpty+1] = AOT_NOFORM;
strncpy ( (char*)m_sOrigToken, (char*)pToken, sizeof(m_sOrigToken) );
m_sOrigToken[sizeof ( m_sOrigToken ) - 1] = '\0';
}
// schedule lemmas 2+ for return
if ( m_FindResults[1]!=AOT_NOFORM )
m_iCurrent = 0;
// suddenly, post-morphology wordforms
if ( m_pWordforms && m_pWordforms->m_bHavePostMorphNF )
m_pWordforms->ToNormalForm ( pToken, false, false );
m_eTokenMorph = SPH_TOKEN_MORPH_GUESS;
return pToken;
}
namespace {
XQNode_t* CloneKeyword ( const XQNode_t* pNode )
{
assert ( pNode );
auto* pRes = new XQNode_t ( pNode->m_dSpec );
pRes->m_dWords = pNode->m_dWords;
return pRes;
}
/// create a node from a set of lemmas
/// WARNING, tKeyword might or might not be pointing to pNode->m_dWords[0]
/// Called from the daemon side (searchd) in time of query
void TransformAotFilterKeyword ( XQNode_t * pNode, LemmatizerTrait_i * pLemmatizer, const XQKeyword_t & tKeyword, const CSphWordforms * pWordforms, const CSphIndexSettings & tSettings )
{
assert ( pNode->m_dWords.GetLength()<=1 );
assert ( pNode->m_dChildren.GetLength()==0 );
XQNode_t * pExact = nullptr;
if ( pWordforms )
{
// do a copy, because patching in place is not an option
// short => longlonglong wordform mapping would crash
// OPTIMIZE? forms that are not found will (?) get looked up again in the dict
char sBuf [ MAX_KEYWORD_BYTES ];
strncpy ( sBuf, tKeyword.m_sWord.cstr(), sizeof(sBuf)-1 );
if ( pWordforms->ToNormalForm ( (BYTE*)sBuf, true, false ) )
{
if ( !pNode->m_dWords.GetLength() )
pNode->m_dWords.Add ( tKeyword );
pNode->m_dWords[0].m_sWord = sBuf;
pNode->m_dWords[0].m_bMorphed = true;
return;
}
}
StrVec_t dLemmas;
DWORD uLangMask = tSettings.m_uAotFilterMask;
for ( int i=AOT_BEGIN; i<AOT_LENGTH; ++i )
{
if ( uLangMask & (1UL<<i) )
{
if ( i==AOT_RU )
sphAotLemmatizeRu ( dLemmas, (const BYTE*)tKeyword.m_sWord.cstr() );
else if ( i==AOT_DE )
sphAotLemmatizeDe ( dLemmas, (const BYTE*)tKeyword.m_sWord.cstr() );
else if ( i==AOT_UK )
sphAotLemmatizeUk ( dLemmas, (const BYTE*)tKeyword.m_sWord.cstr(), pLemmatizer );
else
sphAotLemmatize ( dLemmas, (const BYTE*)tKeyword.m_sWord.cstr(), i );
}
}
// post-morph wordforms
if ( pWordforms && pWordforms->m_bHavePostMorphNF )
{
char sBuf [ MAX_KEYWORD_BYTES ];
ARRAY_FOREACH ( i, dLemmas )
{
strncpy ( sBuf, dLemmas[i].cstr(), sizeof(sBuf)-1 );
if ( pWordforms->ToNormalForm ( (BYTE*)sBuf, false, false ) )
dLemmas[i] = sBuf;
}
}
if ( dLemmas.GetLength() && tSettings.m_bIndexExactWords )
{
pExact = CloneKeyword ( pNode );
if ( !pExact->m_dWords.GetLength() )
pExact->m_dWords.Add ( tKeyword );
pExact->m_dWords[0].m_sWord.SetSprintf ( "=%s", tKeyword.m_sWord.cstr() );
pExact->m_pParent = pNode;
}
if ( !pExact && dLemmas.GetLength()<=1 )
{
// zero or one lemmas, update node in-place
if ( !pNode->m_dWords.GetLength() )
pNode->m_dWords.Add ( tKeyword );
if ( dLemmas.GetLength() )
{
pNode->m_dWords[0].m_sWord = dLemmas[0];
pNode->m_dWords[0].m_bMorphed = true;
}
} else
{
// multiple lemmas, create an OR node
pNode->SetOp ( SPH_QUERY_OR );
ARRAY_FOREACH ( i, dLemmas )
{
pNode->m_dChildren.Add ( new XQNode_t ( pNode->m_dSpec ) );
pNode->m_dChildren.Last()->m_pParent = pNode;
XQKeyword_t & tLemma = pNode->m_dChildren.Last()->m_dWords.Add();
tLemma.m_sWord = dLemmas[i];
tLemma.m_iAtomPos = tKeyword.m_iAtomPos;
tLemma.m_bFieldStart = tKeyword.m_bFieldStart;
tLemma.m_bFieldEnd = tKeyword.m_bFieldEnd;
tLemma.m_bMorphed = true;
}
pNode->m_dWords.Reset();
if ( pExact )
pNode->m_dChildren.Add ( pExact );
}
}
}// namespace
/// AOT morph guesses transform
/// replaces tokens with their respective morph guesses subtrees
/// used in lemmatize_ru_all morphology processing mode that can generate multiple guesses
/// in other modes, there is always exactly one morph guess, and the dictionary handles it
/// Called from the daemon side (searchd)
void TransformAotFilter ( XQNode_t * pNode, LemmatizerTrait_i * pLemmatizer, const CSphWordforms * pWordforms, const CSphIndexSettings & tSettings )
{
if ( !pNode )
return;
// case one, regular operator (and empty nodes)
for ( XQNode_t* pChild : pNode->m_dChildren )
TransformAotFilter ( pChild, pLemmatizer, pWordforms, tSettings );
if ( pNode->m_dChildren.GetLength() || pNode->m_dWords.GetLength()==0 )
return;
// case two, operator on a bag of words
// FIXME? check phrase vs expand_keywords vs lemmatize_ru_all?
if ( pNode->m_dWords.GetLength()
&& ( pNode->GetOp()==SPH_QUERY_PHRASE || pNode->GetOp()==SPH_QUERY_PROXIMITY || pNode->GetOp()==SPH_QUERY_QUORUM ) )
{
assert ( pNode->m_dWords.GetLength() );
for ( XQKeyword_t& tWord : pNode->m_dWords )
{
auto * pNew = new XQNode_t ( pNode->m_dSpec );
pNew->m_pParent = pNode;
pNew->m_iAtomPos = tWord.m_iAtomPos;
pNode->m_dChildren.Add ( pNew );
TransformAotFilterKeyword ( pNew, pLemmatizer, tWord, pWordforms, tSettings );
}
pNode->m_dWords.Reset();
pNode->m_bVirtuallyPlain = true;
return;
}
// case three, plain old single keyword
assert ( pNode->m_dWords.GetLength()==1 );
TransformAotFilterKeyword ( pNode, pLemmatizer, pNode->m_dWords[0], pWordforms, tSettings );
}
void TransformAotFilter ( XQNode_t * pNode, const CSphWordforms * pWordforms, const CSphIndexSettings & tSettings )
{
if ( !tSettings.m_uAotFilterMask )
return;
int iAotLang = ( tSettings.m_uAotFilterMask & ( 1<<AOT_UK ) ) ? AOT_UK : AOT_LENGTH;
std::unique_ptr<LemmatizerTrait_i> tLemmatizer = CreateLemmatizer ( iAotLang );
TransformAotFilter ( pNode, tLemmatizer.get(), pWordforms, tSettings );
}
| 60,501
|
C++
|
.cpp
| 1,862
| 29.835124
| 185
| 0.671048
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,847
|
attrindex_builder.cpp
|
manticoresoftware_manticoresearch/src/attrindex_builder.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "attrindex_builder.h"
#include <cfloat>
#include <climits>
#include "conversion.h"
#include "sphinxint.h"
// dirty hack for some build systems which not has LLONG_MAX
#ifndef LLONG_MAX
#define LLONG_MAX (((unsigned long long)(-1))>>1)
#endif
#ifndef LLONG_MIN
#define LLONG_MIN (-LLONG_MAX-1)
#endif
#ifndef ULLONG_MAX
#define ULLONG_MAX (LLONG_MAX * 2ULL + 1)
#endif
AttrIndexBuilder_c::AttrIndexBuilder_c ( const CSphSchema & tSchema )
{
Init ( tSchema );
}
void AttrIndexBuilder_c::Init ( const CSphSchema& tSchema )
{
m_uStride = tSchema.GetRowSize();
for ( int i = 0; i < tSchema.GetAttrsCount(); ++i )
{
const CSphColumnInfo& tCol = tSchema.GetAttr ( i );
if ( tCol.IsColumnar() )
continue;
switch ( tCol.m_eAttrType )
{
case SPH_ATTR_INTEGER:
case SPH_ATTR_TIMESTAMP:
case SPH_ATTR_BOOL:
case SPH_ATTR_BIGINT:
case SPH_ATTR_TOKENCOUNT:
m_dIntAttrs.Add ( tCol.m_tLocator );
break;
case SPH_ATTR_FLOAT:
m_dFloatAttrs.Add ( tCol.m_tLocator );
break;
default:
break;
}
}
m_dIntMin.Resize ( m_dIntAttrs.GetLength() );
m_dIntMax.Resize ( m_dIntAttrs.GetLength() );
m_dFloatMin.Resize ( m_dFloatAttrs.GetLength() );
m_dFloatMax.Resize ( m_dFloatAttrs.GetLength() );
m_dIntIndexMin.Resize ( m_dIntAttrs.GetLength() );
m_dIntIndexMax.Resize ( m_dIntAttrs.GetLength() );
m_dFloatIndexMin.Resize ( m_dFloatAttrs.GetLength() );
m_dFloatIndexMax.Resize ( m_dFloatAttrs.GetLength() );
m_dIntIndexMin.Fill ( LLONG_MAX );
m_dIntIndexMax.Fill ( LLONG_MIN );
m_dFloatIndexMin.Fill ( FLT_MAX );
m_dFloatIndexMax.Fill ( FLT_MIN );
ResetLocal();
}
void AttrIndexBuilder_c::Collect ( const CSphRowitem * pRow )
{
// check if it is time to flush already collected values
if ( m_nLocalCollected>=DOCINFO_INDEX_FREQ )
FlushComputed();
m_nLocalCollected++;
// ints
ARRAY_FOREACH ( i, m_dIntAttrs )
{
SphAttr_t tVal = sphGetRowAttr ( pRow, m_dIntAttrs[i] );
m_dIntMin[i] = Min ( m_dIntMin[i], tVal );
m_dIntMax[i] = Max ( m_dIntMax[i], tVal );
}
// floats
ARRAY_FOREACH ( i, m_dFloatAttrs )
{
float fVal = sphDW2F ( (DWORD)sphGetRowAttr ( pRow, m_dFloatAttrs[i] ) );
m_dFloatMin[i] = Min ( m_dFloatMin[i], fVal );
m_dFloatMax[i] = Max ( m_dFloatMax[i], fVal );
}
}
void AttrIndexBuilder_c::FinishCollect()
{
if ( m_nLocalCollected )
FlushComputed();
CSphRowitem * pMinAttrs = m_dMinMaxRows.AddN ( m_uStride*2 );
CSphRowitem * pMaxAttrs = pMinAttrs+m_uStride;
memset ( pMinAttrs, 0, sizeof(CSphRowitem) * m_uStride * 2 );
ARRAY_FOREACH ( i, m_dIntAttrs )
{
sphSetRowAttr ( pMinAttrs, m_dIntAttrs[i], m_dIntIndexMin[i] );
sphSetRowAttr ( pMaxAttrs, m_dIntAttrs[i], m_dIntIndexMax[i] );
}
ARRAY_FOREACH ( i, m_dFloatAttrs )
{
sphSetRowAttr ( pMinAttrs, m_dFloatAttrs[i], sphF2DW ( m_dFloatIndexMin[i] ) );
sphSetRowAttr ( pMaxAttrs, m_dFloatAttrs[i], sphF2DW ( m_dFloatIndexMax[i] ) );
}
}
const CSphTightVector<CSphRowitem> & AttrIndexBuilder_c::GetCollected() const
{
return m_dMinMaxRows;
}
void AttrIndexBuilder_c::ResetLocal()
{
for ( auto & i : m_dIntMin )
i = LLONG_MAX;
for ( auto & i : m_dIntMax )
i = LLONG_MIN;
for ( auto & i : m_dFloatMin )
i = FLT_MAX;
for ( auto & i : m_dFloatMax )
i = FLT_MIN;
m_nLocalCollected = 0;
}
void AttrIndexBuilder_c::FlushComputed ()
{
CSphRowitem * pMinAttrs = m_dMinMaxRows.AddN ( m_uStride*2 );
CSphRowitem * pMaxAttrs = pMinAttrs+m_uStride;
memset ( pMinAttrs, 0, sizeof ( CSphRowitem ) * m_uStride * 2 );
ARRAY_FOREACH ( i, m_dIntAttrs )
{
m_dIntIndexMin[i] = Min ( m_dIntIndexMin[i], m_dIntMin[i] );
m_dIntIndexMax[i] = Max ( m_dIntIndexMax[i], m_dIntMax[i] );
sphSetRowAttr ( pMinAttrs, m_dIntAttrs[i], m_dIntMin[i] );
sphSetRowAttr ( pMaxAttrs, m_dIntAttrs[i], m_dIntMax[i] );
}
ARRAY_FOREACH ( i, m_dFloatAttrs )
{
m_dFloatIndexMin[i] = Min ( m_dFloatIndexMin[i], m_dFloatMin[i] );
m_dFloatIndexMax[i] = Max ( m_dFloatIndexMax[i], m_dFloatMax[i] );
sphSetRowAttr ( pMinAttrs, m_dFloatAttrs[i], sphF2DW ( m_dFloatMin[i] ) );
sphSetRowAttr ( pMaxAttrs, m_dFloatAttrs[i], sphF2DW ( m_dFloatMax[i] ) );
}
ResetLocal();
}
| 4,615
|
C++
|
.cpp
| 144
| 29.770833
| 81
| 0.70713
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,848
|
netreceive_api.cpp
|
manticoresoftware_manticoresearch/src/netreceive_api.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "netreceive_api.h"
extern int g_iClientTimeoutS; // from searchd.cpp
extern volatile bool g_bMaintenance;
static auto & g_bGotSighup = sphGetGotSighup (); // we just received SIGHUP; need to log
// mostly repeats HandleClientSphinx
void ApiServe ( std::unique_ptr<AsyncNetBuffer_c> pBuf )
{
auto& tSess = session::Info();
// non-vip connections in maintainance should be already rejected on accept
assert ( !g_bMaintenance || tSess.GetVip() );
auto eExpectedProto = tSess.GetProto();
bool bClientWaitsHandshake = eExpectedProto==Proto_e::SPHINXSE;
tSess.SetProto ( Proto_e::SPHINX );
int iCID = tSess.GetConnID();
const char * sClientIP = tSess.szClientName();
// needed to check permission to turn maintenance mode on/off
auto& tOut = *(GenericOutputBuffer_c *) pBuf.get();
auto& tIn = *(AsyncNetInputBuffer_c *) pBuf.get();
// send handshake
tSess.SetTaskState ( TaskState_e::HANDSHAKE );
tOut.SendDword ( SPHINX_SEARCHD_PROTO ); // that is handshake
// SphinxSE - legacy client, waits first handshake from us to send, and answers only when it is done.
if ( bClientWaitsHandshake && !tOut.Flush () )
{
sphLogDebugv ( "conn %s(%d): legacy client timeout when sending handshake", sClientIP, iCID );
return;
}
if ( !tIn.ReadFrom ( 4, true ))
{
sphWarning ( "failed to receive API handshake (client=%s(%d), exp=%d, error='%s')",
sClientIP, iCID, 4, sphSockError ());
return;
}
auto uHandshake = tIn.GetDword();
sphLogDebugv ( "conn %s(%d): got handshake, major v.%d", sClientIP, iCID, uHandshake );
if ( uHandshake!=SPHINX_CLIENT_VERSION && uHandshake!=0x01000000UL )
{
sphLogDebugv ( "conn %s(%d): got handshake, major v.%d", sClientIP, iCID, uHandshake );
return;
}
// legacy client - sends us exactly 4 bytes of handshake, so we have to flush our handshake also before continue.
if ( !bClientWaitsHandshake && !tIn.HasBytes () && !tOut.Flush ())
{
sphLogDebugv ( "conn %s(%d): legacy client timeout when exchanging handshake", sClientIP, iCID );
return;
}
if ( eExpectedProto==Proto_e::HTTPS )
{
SendErrorReply ( tOut, "Binary API request was sent to HTTPS port" );
tOut.Flush (); // no need to check return code since we anyway break
return;
}
int iPconnIdleS = 0;
// main loop for one or more commands (if persist)
do
{
if ( !tIn.HasBytes ())
tIn.DiscardProcessed ();
// default client_timeout vs 5 seconds
// to be on pair with HTTP handler \ HttpServe code
// for persist connection should wait client_timeout or reactivate sock timeout after each received packet
auto iTimeoutS = ( tSess.GetPersistent() ? g_iClientTimeoutS : g_iReadTimeoutS );
sphLogDebugv ( "conn %s(%d): loop start with timeout %d", sClientIP, iCID, iTimeoutS );
tIn.SetTimeoutUS ( S2US * iTimeoutS );
tSess.SetKilled ( false );
// in "persistent connection" mode, we want interruptible waits
// so that the worker child could be forcibly restarted
//
// currently, the only signal allowed to interrupt this read is SIGTERM
// letting SIGHUP interrupt causes trouble under query/rotation pressure
// see sphSockRead() and ReadFrom() for details
bool bCommand = tIn.ReadFrom ( 8, tSess.GetPersistent() );
if ( !bCommand )
{
// on SIGTERM, bail unconditionally and immediately, at all times
if ( sphInterrupted () )
{
sphLogDebugv ( "conn %s(%d): bailing on SIGTERM", sClientIP, iCID );
break;
}
// on SIGHUP vs pconn, bail if a pconn was idle for 1 sec
if ( tSess.GetPersistent() && sphSockPeekErrno ()==ETIMEDOUT )
{
sphLogDebugv ( "conn %s(%d): persist + timeout condition", sClientIP, iCID );
if ( g_bGotSighup )
{
sphLogDebugv ( "conn %s(%d): bailing idle pconn on SIGHUP", sClientIP, iCID );
break;
}
// on pconn that was idle for 300 sec (client_timeout), bail
iPconnIdleS += iTimeoutS;
bool bClientTimedout = ( iPconnIdleS>=g_iClientTimeoutS );
if ( bClientTimedout )
sphLogDebugv ( "conn %s(%d): bailing idle pconn on client_timeout", sClientIP, iCID );
else
{
pBuf->ResetError();
sphLogDebugv ( "conn %s(%d): timeout, not reached, continue", sClientIP, iCID );
continue;
}
}
break; // some error, need not to continue.
}
iPconnIdleS = 0;
auto eCommand = (SearchdCommand_e) tIn.GetWord ();
auto uVer = tIn.GetWord ();
auto iReplySize = tIn.GetInt ();
sphLogDebugv ( "read command %d, version %d, reply size %d", eCommand, uVer, iReplySize );
bool bCheckLen = ( eCommand!= SEARCHD_COMMAND_CLUSTER );
bool bBadCommand = ( eCommand>=SEARCHD_COMMAND_WRONG );
// should not fail replication commands from other nodes as max_packet_size could be different between nodes
bool bBadLength = ( iReplySize<0 || ( bCheckLen && iReplySize>tIn.GetMaxPacketSize() ) );
if ( bBadCommand || bBadLength )
{
// unknown command, default response header
if ( bBadLength )
sphWarning ( "ill-formed client request (length=%d out of bounds)", iReplySize );
// if command is insane, low level comm is broken, so we bail out
if ( bBadCommand )
sphWarning ( "ill-formed client request (command=%d, SEARCHD_COMMAND_TOTAL=%d)", eCommand,
SEARCHD_COMMAND_TOTAL );
SendErrorReply ( tOut, "invalid %s (code=%d, len=%d)", ( bBadLength ? "length" : "command" ), eCommand, iReplySize );
tOut.Flush(); // no need to check return code since we anyway break
break;
}
if ( !bCheckLen )
tIn.SetMaxPacketSize ( tIn.GetBufferPos() + iReplySize );
if ( iReplySize && !tIn.ReadFrom ( iReplySize, true ))
{
sphWarning ( "failed to receive API body (client=%s(%d), exp=%d(%d), error='%s')",
sClientIP, iCID, iReplySize, tIn.HasBytes(), sphSockError ());
break;
}
auto& tCrashQuery = GlobalCrashQueryGetRef();
tCrashQuery.m_dQuery = { tIn.GetBufferPtr (), iReplySize };
tCrashQuery.m_eType = QUERY_API;
tCrashQuery.m_uCMD = eCommand;
tCrashQuery.m_uVer = uVer;
// special process for 'ping' as immediate answer (before 'maxed out' check)
if ( eCommand == SEARCHD_COMMAND_PING )
{
HandleCommandPing ( tOut, uVer, tIn );
tOut.Flush(); // no need to check return code since we anyway break
break;
}
if ( IsMaxedOut() )
{
sphWarning ( "%s", g_sMaxedOutMessage.first );
{
auto tHdr = APIHeader ( tOut, SEARCHD_RETRY );
tOut.SendString ( g_sMaxedOutMessage );
}
tOut.Flush(); // no need to check return code since we anyway break
gStats().m_iMaxedOut.fetch_add ( 1, std::memory_order_relaxed );
break;
}
// persist is special command - no version, no answer expected, modifies persistent state - so process it here
if ( eCommand == SEARCHD_COMMAND_PERSIST )
{
auto bPersist = ( tIn.GetInt()!=0 );
sphLogDebugv ( "conn %s(%d): pconn is now %s", tSess.szClientName (), tSess.GetConnID(), bPersist ? "on" : "off" );
tSess.SetPersistent ( bPersist );
}
ExecuteApiCommand ( eCommand, uVer, iReplySize, tIn, tOut );
if ( !tOut.Flush () )
break;
pBuf->SyncErrorState();
if ( tIn.GetError() )
sphWarning ( "%s", tIn.GetErrorMessage().cstr() );
pBuf->ResetError();
} while ( tSess.GetPersistent());
sphLogDebugv ( "conn %s(%d): exiting", sClientIP, iCID );
}
// Start Sphinx API command/request header
APIBlob_c APIHeader ( ISphOutputBuffer & dBuff, WORD uCommand, WORD uVer )
{
dBuff.SendWord ( uCommand );
dBuff.SendWord ( uVer );
return APIBlob_c ( dBuff );
}
// Sphinx API answer (same as APIHeader, but 2-nd and 3-rd params interchanged. Fixme! Unify.
APIBlob_c APIAnswer ( ISphOutputBuffer & dBuff, WORD uVer, WORD uStatus )
{
return APIHeader ( dBuff, uStatus, uVer );
}
| 8,116
|
C++
|
.cpp
| 196
| 38.229592
| 120
| 0.699937
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,849
|
spelldump.cpp
|
manticoresoftware_manticoresearch/src/spelldump.cpp
|
//
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxstd.h"
#include "tokenizer/charset_definition_parser.h"
#include "tokenizer/lowercaser.h"
#include <locale.h>
const int MAX_STR_LENGTH = 512;
//////////////////////////////////////////////////////////////////////////
static BYTE GetWordchar ( const char * & szSet )
{
if ( *szSet=='\\' )
{
if ( !szSet[1] || !szSet[2] || !szSet[3] )
return 0;
char szBuf[3];
memcpy ( szBuf, szSet+2, 2 );
szBuf[2] = 0;
char * szStop = nullptr;
int iRes = strtol ( szBuf, &szStop, 16 );
if ( szStop!=szBuf+2 || iRes<0 || iRes>255 )
return 0;
szSet += 4;
return (BYTE) iRes;
}
return *szSet++;
}
static bool IsInSet ( BYTE uLetter, const char * szSet )
{
if ( !szSet )
return false;
bool bInvert = ( *szSet=='^' );
if ( bInvert )
++szSet;
const char * szSep = strchr ( szSet, '-' );
bool bRange = ( szSep!=nullptr );
if ( bRange )
{
BYTE uRange1 = GetWordchar ( szSet );
szSep++;
BYTE uRange2 = GetWordchar ( szSep );
if ( uLetter>=Min ( uRange1, uRange2 ) && uLetter<=Max ( uRange1, uRange2 ) )
return !bInvert;
} else
{
BYTE uChar = 0;
while ( ( uChar = GetWordchar ( szSet ) )!=0 )
if ( uChar==uLetter )
break;
bool bEnd = !uChar;
if ( bInvert && bEnd )
return true;
if ( !bInvert && !bEnd )
return true;
}
return false;
}
static bool GetSetMinMax ( const char * szSet, BYTE & uMin, BYTE & uMax )
{
if ( !szSet || !*szSet )
return false;
uMin = GetWordchar ( szSet );
uMax = uMin;
BYTE uChar;
while ( ( uChar = GetWordchar ( szSet ) )!=0 )
if ( uChar!='-' )
{
uMin = Min ( uMin, uChar );
uMax = Max ( uMax, uChar );
}
if ( !uMin || !uMax )
return false;
return true;
}
//////////////////////////////////////////////////////////////////////////
class CISpellDict
{
public:
struct CISpellDictWord
{
CSphString m_sWord;
CSphString m_sFlags;
};
bool Load ( const char * szFilename );
void IterateStart ();
const CISpellDictWord * IterateNext ();
private:
CSphVector < CISpellDictWord > m_dEntries;
int m_iIterator = 0;
};
bool CISpellDict::Load ( const char * szFilename )
{
if ( !szFilename )
return false;
m_dEntries.Reset ();
m_dEntries.Reserve ( 131072 );
FILE * pFile = fopen ( szFilename, "rt" );
if ( !pFile )
return false;
char szWordBuffer [MAX_STR_LENGTH];
while ( !feof ( pFile ) )
{
char * szResult = fgets ( szWordBuffer, MAX_STR_LENGTH, pFile );
if ( !szResult )
break;
auto iPos = (int) strlen ( szWordBuffer ) - 1;
while ( iPos>=0 && isspace ( (unsigned char)szWordBuffer[iPos] ) )
szWordBuffer [iPos--] = '\0';
CISpellDictWord Word;
char * szPosition = strchr ( szWordBuffer, '/' );
if ( !szPosition )
{
szPosition = szWordBuffer;
while ( *szPosition && !isspace ( (unsigned char)*szPosition ) )
++szPosition;
*szPosition = '\0';
Word.m_sWord = szWordBuffer;
} else
{
*szPosition = '\0';
Word.m_sWord = szWordBuffer;
++szPosition;
char * szFlags = szPosition;
while ( *szPosition && !isspace ( (unsigned char)*szPosition ) )
++szPosition;
*szPosition = '\0';
Word.m_sFlags = szFlags;
}
m_dEntries.Add ( Word );
}
fclose ( pFile );
return true;
}
void CISpellDict::IterateStart ()
{
m_iIterator = 0;
}
const CISpellDict::CISpellDictWord * CISpellDict::IterateNext ()
{
if ( m_iIterator>=m_dEntries.GetLength() )
return nullptr;
return &m_dEntries [m_iIterator++];
}
//////////////////////////////////////////////////////////////////////////
enum RuleType_e
{
RULE_NONE,
RULE_PREFIXES,
RULE_SUFFIXES
};
class CISpellAffixRule
{
public:
CISpellAffixRule () = default;
CISpellAffixRule ( RuleType_e eRule, char cFlag, bool bCrossProduct, char * szCondition, char * szStrip, char * szAppend );
bool Apply ( CSphString & sWord );
char Flag () const;
bool IsCrossProduct () const;
bool IsPrefix () const;
private:
RuleType_e m_eRule { RULE_NONE };
char m_cFlag {0};
bool m_bCrossProduct = false;
CSphString m_sCondition;
CSphString m_sStrip;
CSphString m_sAppend;
int m_iWordLen = 0;
int m_iCondLen = 0;
int m_iStripLen = 0;
int m_iAppendLen = 0;
bool CheckSuffix ( const CSphString & sWord ) const;
bool CheckPrefix ( const CSphString & sWord ) const;
bool StripAppendSuffix ( CSphString & sWord ) const;
bool StripAppendPrefix ( CSphString & sWord ) const;
};
CISpellAffixRule::CISpellAffixRule ( RuleType_e eRule, char cFlag, bool bCrossProduct, char * szCondition, char * szStrip, char * szAppend )
: m_eRule ( eRule )
, m_cFlag ( cFlag )
, m_bCrossProduct ( bCrossProduct )
, m_sCondition ( szCondition )
, m_sStrip ( szStrip )
, m_sAppend ( szAppend )
, m_iWordLen ( 0 )
{
m_iCondLen = szCondition ? (int) strlen ( szCondition ) : 0;
m_iStripLen = szStrip ? (int) strlen ( szStrip ) : 0;
m_iAppendLen = szAppend ? (int) strlen ( szAppend ) : 0;
}
bool CISpellAffixRule::Apply ( CSphString & sWord )
{
if ( m_sCondition.IsEmpty () )
return true;
if ( sWord.IsEmpty () )
return false;
m_iWordLen = (int) strlen ( sWord.cstr () );
bool bDotCond = ( m_sCondition=="." );
if ( m_eRule==RULE_SUFFIXES )
{
if ( !bDotCond && !CheckSuffix ( sWord ) )
return false;
if ( !StripAppendSuffix ( sWord ) )
return false;
} else
{
if ( !bDotCond && !CheckPrefix ( sWord ) )
return false;
if ( !StripAppendPrefix ( sWord ) )
return false;
}
return true;
}
bool CISpellAffixRule::CheckSuffix ( const CSphString & sWord ) const
{
int iCondI = m_iCondLen-1;
for ( int i=m_iWordLen-1; iCondI>=0 && i>=0; --i )
{
if ( m_sCondition.cstr()[iCondI]=='.' )
{
--iCondI;
} else if ( m_sCondition.cstr()[iCondI]!=']' )
{
if ( m_sCondition.cstr()[iCondI]!=sWord.cstr()[i] )
return false;
--iCondI;
} else
{
int iRangeStart = -1;
for ( int j=iCondI; j>=0 && iRangeStart==-1; --j )
if ( m_sCondition.cstr()[j]=='[' )
iRangeStart = j;
if ( iRangeStart==-1 )
return false;
else
{
if ( !IsInSet ( sWord.cstr () [i], m_sCondition.SubString ( iRangeStart + 1, iCondI - iRangeStart - 1 ).cstr () ) )
return false;
iCondI = iRangeStart - 1;
}
}
}
return true;
}
bool CISpellAffixRule::StripAppendSuffix ( CSphString & sWord ) const
{
static char szTmp [ MAX_STR_LENGTH];
if ( !m_sStrip.IsEmpty () )
{
if ( m_iWordLen < m_iStripLen )
return false;
if ( strncmp ( sWord.cstr () + m_iWordLen - m_iStripLen, m_sStrip.cstr (), m_iStripLen ) )
return false;
}
strncpy ( szTmp, sWord.cstr (), m_iWordLen - m_iStripLen );
szTmp [m_iWordLen - m_iStripLen] = '\0';
if ( !m_sAppend.IsEmpty () )
strncat ( szTmp, m_sAppend.cstr (), m_iAppendLen );
sWord = szTmp;
return true;
}
bool CISpellAffixRule::CheckPrefix ( const CSphString & sWord ) const
{
int iCondI = 0;
for ( int i = 0; iCondI < m_iCondLen && i < m_iWordLen; ++i )
{
if ( m_sCondition.cstr()[iCondI]!='[' )
{
if ( m_sCondition.cstr()[iCondI]!=sWord.cstr()[i] )
return false;
++iCondI;
} else
{
int iRangeEnd = -1;
for ( int j=iCondI; j<m_iCondLen && iRangeEnd==-1; ++j )
if ( m_sCondition.cstr()[j]==']' )
iRangeEnd = j;
if ( iRangeEnd==-1 )
return false;
else
{
if ( !IsInSet ( sWord.cstr () [i], m_sCondition.SubString ( iCondI + 1, iRangeEnd - iCondI - 1 ).cstr () ) )
return false;
}
}
}
return true;
}
bool CISpellAffixRule::StripAppendPrefix ( CSphString & sWord ) const
{
static char szTmp [MAX_STR_LENGTH];
if ( !m_sStrip.IsEmpty () )
{
const char * Pos = strstr ( sWord.cstr (), m_sStrip.cstr () );
if ( Pos!=sWord.cstr() )
return false;
}
if ( !m_sAppend.IsEmpty () )
strncpy ( szTmp, m_sAppend.cstr(), m_iAppendLen );
strncpy ( szTmp + m_iAppendLen, sWord.cstr () + m_iStripLen, m_iWordLen - m_iStripLen );
szTmp [m_iWordLen - m_iStripLen + m_iAppendLen] = '\0';
sWord = szTmp;
return true;
}
char CISpellAffixRule::Flag () const
{
return m_cFlag;
}
bool CISpellAffixRule::IsCrossProduct () const
{
return m_bCrossProduct;
}
bool CISpellAffixRule::IsPrefix () const
{
return m_eRule==RULE_PREFIXES;
}
//////////////////////////////////////////////////////////////////////////
enum AffixFormat_e
{
AFFIX_FORMAT_ISPELL = 0,
AFFIX_FORMAT_MYSPELL = 1,
AFFIX_FORMAT_UNKNOWN
};
static const char * AffixFormatName[] =
{
"ISpell",
"MySpell"
};
class CISpellAffix
{
public:
CISpellAffix ( const char * szLocale, const char * szCharsetFile );
bool Load ( const char * szFilename );
CISpellAffixRule * GetRule ( int iRule );
int GetNumRules () const;
bool CheckCrosses () const;
private:
CSphVector < CISpellAffixRule > m_dRules;
char m_dCharset [256] {0};
bool m_bFirstCaseConv = true;
CSphString m_sLocale;
CSphString m_sCharsetFile;
bool m_bCheckCrosses = false;
LowercaserRefcountedPtr m_pLowerCaser;
bool m_bUseDictConversion = false;
bool AddToCharset ( char * szRangeL, char * szRangeU );
void AddCharPair ( BYTE uCharL, BYTE uCharU );
void Strip ( char * szText );
char ToLowerCase ( char cChar );
void LoadLocale ();
AffixFormat_e DetectFormat ( FILE * );
bool LoadISpell ( FILE * );
bool LoadMySpell ( FILE * );
};
CISpellAffix::CISpellAffix ( const char * szLocale, const char * szCharsetFile )
: m_sLocale ( szLocale )
, m_sCharsetFile ( szCharsetFile )
{
}
AffixFormat_e CISpellAffix::DetectFormat ( FILE * pFile )
{
char sBuffer [MAX_STR_LENGTH];
while ( !feof ( pFile ) )
{
char * sLine = fgets ( sBuffer, MAX_STR_LENGTH, pFile );
if ( !sLine )
break;
if ( !strncmp ( sLine, "SFX", 3 ) ) return AFFIX_FORMAT_MYSPELL;
if ( !strncmp ( sLine, "PFX", 3 ) ) return AFFIX_FORMAT_MYSPELL;
if ( !strncmp ( sLine, "REP", 3 ) ) return AFFIX_FORMAT_MYSPELL;
if ( !strncasecmp ( sLine, "prefixes", 8 ) ) return AFFIX_FORMAT_ISPELL;
if ( !strncasecmp ( sLine, "suffixes", 8 ) ) return AFFIX_FORMAT_ISPELL;
if ( !strncasecmp ( sLine, "flag", 4 ) ) return AFFIX_FORMAT_ISPELL;
}
return AFFIX_FORMAT_UNKNOWN;
}
bool CISpellAffix::Load ( const char * szFilename )
{
if ( !szFilename )
return false;
m_dRules.Reset ();
memset ( m_dCharset, 0, sizeof ( m_dCharset ) );
m_bFirstCaseConv = true;
m_pLowerCaser = nullptr;
m_bUseDictConversion = false;
FILE * pFile = fopen ( szFilename, "rt" );
if ( !pFile )
return false;
bool bResult = false;
AffixFormat_e eFormat = DetectFormat ( pFile );
if ( eFormat==AFFIX_FORMAT_UNKNOWN )
printf ( "Failed to detect affix file format\n" );
else
{
fseek ( pFile, SEEK_SET, 0 );
printf ( "Using %s affix file format\n", AffixFormatName[eFormat] );
if ( eFormat==AFFIX_FORMAT_MYSPELL )
bResult = LoadMySpell ( pFile );
else // if ( eFormat==AFFIX_FORMAT_ISPELL )
bResult = LoadISpell ( pFile );
}
fclose ( pFile );
bool bHaveCrossPrefix = false;
for ( int i = 0; i < m_dRules.GetLength () && !bHaveCrossPrefix; i++ )
if ( m_dRules[i].IsPrefix() && m_dRules[i].IsCrossProduct() )
bHaveCrossPrefix = true;
bool bHaveCrossSuffix = false;
for ( int i = 0; i < m_dRules.GetLength () && !bHaveCrossSuffix; i++ )
if ( !m_dRules[i].IsPrefix() && m_dRules[i].IsCrossProduct() )
bHaveCrossSuffix = true;
m_bCheckCrosses = bHaveCrossPrefix && bHaveCrossSuffix;
return bResult;
}
bool CISpellAffix::LoadISpell ( FILE * pFile )
{
char szBuffer [ MAX_STR_LENGTH ];
char szCondition [ MAX_STR_LENGTH ];
char szStrip [ MAX_STR_LENGTH ];
char szAppend [ MAX_STR_LENGTH ];
RuleType_e eRule = RULE_NONE;
char cFlag = '\0';
bool bCrossProduct = false;
int iLine = 0;
// TODO: parse all .aff character replacement commands
while ( !feof ( pFile ) )
{
char * szResult = fgets ( szBuffer, MAX_STR_LENGTH, pFile );
if ( !szResult )
break;
iLine++;
if ( !strncasecmp ( szBuffer, "prefixes", 8 ) )
{
eRule = RULE_PREFIXES;
continue;
}
if ( !strncasecmp ( szBuffer, "suffixes", 8 ) )
{
eRule = RULE_SUFFIXES;
continue;
}
if ( !strncasecmp ( szBuffer, "wordchars", 9 ) )
{
char * szStart = szBuffer + 9;
while ( *szStart && isspace ( (unsigned char) *szStart ) )
++szStart;
char * szRangeL = szStart;
while ( *szStart && !isspace ( (unsigned char) *szStart ) )
++szStart;
if ( !*szStart )
{
printf ( "WARNING: Line %d: invalid 'wordchars' statement\n", iLine );
continue;
}
*szStart = '\0';
++szStart;
while ( *szStart && isspace ( (unsigned char) *szStart ) )
++szStart;
char * szRangeU = szStart;
while ( *szStart && !isspace ( (unsigned char) *szStart ) )
++szStart;
*szStart = '\0';
if ( !AddToCharset ( szRangeL, szRangeU ) )
printf ( "WARNING: Line %d: cannot add to charset: '%s' '%s'\n", iLine, szRangeL, szRangeU );
continue;
}
if ( !strncasecmp ( szBuffer, "flag", 4 ) )
{
if ( eRule==RULE_NONE )
{
printf ( "WARNING: Line %d: 'flag' appears before preffixes or suffixes\n", iLine );
continue;
}
char * szStart = szBuffer + 4;
while ( *szStart && isspace ( (unsigned char) *szStart ) )
++szStart;
bCrossProduct = ( *szStart=='*' );
cFlag = bCrossProduct ? *(szStart + 1) : *(szStart);
continue;
}
if ( eRule==RULE_NONE )
continue;
char * szComment = strchr ( szBuffer, '#' );
if ( szComment )
*szComment = '\0';
if ( !* szBuffer )
continue;
szCondition[0] = '\0';
szStrip[0] = '\0';
szAppend[0] = '\0';
int nFields = sscanf ( szBuffer, "%[^>\n]>%[^,\n],%[^\n]", szCondition, szStrip, szAppend ); // NOLINT
Strip ( szCondition );
Strip ( szStrip );
Strip ( szAppend );
switch ( nFields )
{
case 2: // no optional strip-string
strcpy ( szAppend, szStrip ); // NOLINT
szStrip[0] = '\0';
break;
case 3: // all read
break;
default: // invalid repl
continue;
}
CISpellAffixRule Rule ( eRule, cFlag, bCrossProduct, szCondition, szStrip, szAppend );
m_dRules.Add ( Rule );
}
return true;
}
bool CISpellAffix::LoadMySpell ( FILE * pFile )
{
char sBuffer [MAX_STR_LENGTH];
char sCondition [MAX_STR_LENGTH];
char sRemove [MAX_STR_LENGTH];
char sAppend [MAX_STR_LENGTH];
RuleType_e eRule = RULE_NONE;
BYTE cFlag = 0;
BYTE cCombine = 0;
int iCount = 0, iLine = 0;
const char * sMode = 0;
while ( !feof ( pFile ) )
{
char * sLine = fgets ( sBuffer, MAX_STR_LENGTH, pFile );
if ( !sLine )
break;
++iLine;
// prefix and suffix rules
RuleType_e eNewRule = RULE_NONE;
if ( !strncmp ( sLine, "PFX", 3 ) )
{
eNewRule = RULE_PREFIXES;
sMode = "prefix";
} else if ( !strncmp ( sLine, "SFX", 3 ) )
{
eNewRule = RULE_SUFFIXES;
sMode = "suffix";
}
if ( eNewRule!=RULE_NONE )
{
sLine += 3;
while ( *sLine && isspace ( (unsigned char) *sLine ) )
++sLine;
if ( eNewRule!=eRule ) // new rule header
{
if ( iCount )
printf ( "WARNING: Line %d: Premature end of entries.\n", iLine );
if ( sscanf ( sLine, "%c %c %d", &cFlag, &cCombine, &iCount )!=3 ) // NOLINT
printf ( "WARNING; Line %d: Malformed %s header\n", iLine, sMode );
eRule = eNewRule;
} else // current rule continued
{
*sRemove = *sAppend = 0;
char cNewFlag;
if ( sscanf ( sLine, "%c %s %s %s", &cNewFlag, sRemove, sAppend, sCondition )==4 ) // NOLINT
{
if ( cNewFlag!=cFlag )
printf ( "WARNING: Line %d: Flag character mismatch\n", iLine );
if ( *sRemove=='0' && *(sRemove + 1)==0 ) *sRemove = 0;
if ( *sAppend=='0' && *(sAppend + 1)==0 ) *sAppend = 0;
CISpellAffixRule Rule ( eRule, cFlag, cCombine=='Y', sCondition, sRemove, sAppend );
m_dRules.Add ( Rule );
} else
printf ( "WARNING: Line %d: Malformed %s rule\n", iLine, sMode );
if ( !--iCount ) eRule = RULE_NONE;
}
continue;
}
}
return true;
}
CISpellAffixRule * CISpellAffix::GetRule ( int iRule )
{
return &m_dRules [iRule];
}
int CISpellAffix::GetNumRules () const
{
return m_dRules.GetLength ();
}
bool CISpellAffix::CheckCrosses () const
{
return m_bCheckCrosses;
}
bool CISpellAffix::AddToCharset ( char * szRangeL, char * szRangeU )
{
if ( !szRangeL || !szRangeU )
return false;
auto iLengthL = (int) strlen ( szRangeL );
auto iLengthU = (int)strlen ( szRangeU );
bool bSetL = ( iLengthL>0 && szRangeL[0]=='[' && szRangeL[iLengthL-1]==']' );
bool bSetR = ( iLengthU>0 && szRangeU[0]=='[' && szRangeU[iLengthU-1]==']' );
if ( bSetL!=bSetR )
return false;
if ( bSetL )
{
szRangeL [iLengthL - 1] = '\0';
szRangeL = szRangeL + 1;
szRangeU [iLengthU - 1] = '\0';
szRangeU = szRangeU + 1;
BYTE uMinL, uMaxL;
if ( !GetSetMinMax ( szRangeL, uMinL, uMaxL ) )
return false;
BYTE uMinU, uMaxU;
if ( !GetSetMinMax ( szRangeU, uMinU, uMaxU ) )
return false;
if ( ( uMaxU - uMinU )!=( uMaxL - uMinL ) )
return false;
for ( BYTE i=0; i<=( uMaxL - uMinL ); ++i )
if ( IsInSet ( uMinL + i, szRangeL ) && IsInSet ( uMinU + i, szRangeU ) )
AddCharPair ( uMinL + i, uMinU + i );
} else
{
if ( iLengthL > 4 || iLengthU > 4 )
return false;
const char * szL = szRangeL;
const char * szU = szRangeU;
AddCharPair ( GetWordchar(szL), GetWordchar(szU) );
}
m_bUseDictConversion = true;
return true;
}
void CISpellAffix::AddCharPair ( BYTE uCharL, BYTE uCharU )
{
m_dCharset [uCharU] = uCharL;
}
void CISpellAffix::Strip ( char * szText )
{
char * szIterator1 = szText;
char * szIterator2 = szText;
while ( *szIterator1 )
{
if ( !isspace ( (unsigned char) *szIterator1 ) && *szIterator1!='-' )
{
*szIterator2 = *szIterator1;
++szIterator2;
}
++szIterator1;
}
*szIterator2 = '\0';
while ( *szText )
{
*szText = ToLowerCase ( *szText );
++szText;
}
}
char CISpellAffix::ToLowerCase ( char cChar )
{
if ( m_bFirstCaseConv )
{
LoadLocale ();
m_bFirstCaseConv = false;
}
// dictionary conversion
if ( m_bUseDictConversion )
return m_dCharset [(BYTE) cChar] ? m_dCharset [(BYTE) cChar] : cChar;
// user-defined character mapping
if ( m_pLowerCaser )
{
auto cResult = (char)m_pLowerCaser->ToLower ( (BYTE) cChar );
return cResult ? cResult : cChar;
}
// user-specified code page conversion
return (char)tolower ( (BYTE)cChar ); // workaround for systems (eg. FreeBSD) which default to signed char. marvelous!
}
void CISpellAffix::LoadLocale ()
{
if ( m_bUseDictConversion )
printf ( "Using dictionary-defined character set\n" );
else
if ( !m_sCharsetFile.IsEmpty () )
{
FILE * pFile = fopen ( m_sCharsetFile.cstr (), "rt" );
if ( pFile )
{
printf ( "Using character set from '%s'\n", m_sCharsetFile.cstr () );
const int MAX_CHARSET_LENGTH = 4096;
char szBuffer [MAX_CHARSET_LENGTH];
char * szResult = fgets ( szBuffer, MAX_CHARSET_LENGTH, pFile );
if ( szResult )
{
CSphVector<CSphRemapRange> dRemaps;
if ( sphParseCharset ( szBuffer, dRemaps ) )
{
m_pLowerCaser = new CSphLowercaser;
m_pLowerCaser->AddRemaps ( dRemaps );
} else
{
printf ( "Failed to parse charset from '%s'\n", m_sCharsetFile.cstr() );
}
} else
{
printf ( "Failed to read charset from '%s'\n", m_sCharsetFile.cstr() );
}
fclose ( pFile );
} else
{
printf ( "Failed to open '%s'\n", m_sCharsetFile.cstr() );
}
} else
{
if ( !m_sLocale.IsEmpty () )
{
char dLocaleC[256], dLocaleUser[256];
setlocale ( LC_ALL, "C" );
for ( int i=0; i<256; i++ )
dLocaleC[i] = (char) tolower(i);
char * szLocale = setlocale ( LC_CTYPE, m_sLocale.cstr() );
if ( szLocale )
{
printf ( "Using user-defined locale (locale=%s)\n", m_sLocale.cstr() );
for ( int i=0; i<256; i++ )
dLocaleUser[i] = (char) tolower(i);
if ( !memcmp ( dLocaleC, dLocaleUser, 256 ) )
printf ( "WARNING: user-defined locale provides the same case conversion as the default \"C\" locale\n" );
} else
printf ( "WARNING: could not set user-defined locale for case conversions (locale=%s)\n", m_sLocale.cstr() );
} else
printf ( "WARNING: no character set specified\n" );
}
}
//////////////////////////////////////////////////////////////////////////
enum OutputMode_e
{
M_DEBUG,
M_DUPLICATES,
M_LAST,
M_EXACT_OR_LONGEST,
M_DEFAULT = M_EXACT_OR_LONGEST
};
static const char * dModeName[] =
{
"debug",
"duplicates",
"last"
};
struct MapInfo_t
{
CSphString m_sWord;
char m_sRules[3] {0};
};
struct WordLess
{
inline bool IsLess ( const char * a, const char * b ) const
{
return strcoll ( a, b ) < 0;
}
};
typedef CSphOrderedHash < CSphVector<MapInfo_t>, CSphString, CSphStrHashFunc, 100000 > WordMap_t;
static void EmitResult ( WordMap_t & tMap , const CSphString & sFrom, const CSphString & sTo, char cRuleA = 0, char cRuleB = 0 )
{
if ( !tMap.Exists(sFrom) )
tMap.Add ( CSphVector<MapInfo_t>(), sFrom );
MapInfo_t tInfo;
tInfo.m_sWord = sTo;
tInfo.m_sRules[0] = cRuleA;
tInfo.m_sRules[1] = cRuleB;
tInfo.m_sRules[2] = 0;
tMap[sFrom].Add ( tInfo );
}
int main ( int iArgs, char ** dArgs )
{
OutputMode_e eMode = M_DEFAULT;
bool bUseCustomCharset = false;
CSphString sDict, sAffix, sLocale, sCharsetFile, sResult = "result.txt";
printf ( "spelldump, an ispell dictionary dumper\n\n" );
int i = 1;
for ( ; i < iArgs; i++ )
{
if ( !strcmp ( dArgs[i], "-c" ) )
{
if ( ++i==iArgs ) break;
bUseCustomCharset = true;
sCharsetFile = dArgs[i];
} else if ( !strcmp ( dArgs[i], "-m" ) )
{
if ( ++i==iArgs ) break;
char * sMode = dArgs[i];
if ( !strcmp ( sMode, "debug" ) ) { eMode = M_DEBUG; continue; }
if ( !strcmp ( sMode, "duplicates" ) ) { eMode = M_DUPLICATES; continue; }
if ( !strcmp ( sMode, "last" ) ) { eMode = M_LAST; continue; }
if ( !strcmp ( sMode, "default" ) ) { eMode = M_DEFAULT; continue; }
printf ( "Unrecognized mode: %s\n", sMode );
return 1;
} else
break;
}
switch ( iArgs - i )
{
case 4:
sLocale = dArgs[i + 3];
// [[clang::fallthrough]];
case 3:
sResult = dArgs[i + 2];
// [[clang::fallthrough]];
case 2:
sAffix = dArgs[i + 1];
sDict = dArgs[i];
break;
default:
printf ( "Usage: spelldump [options] <dictionary> <affix> [result] [locale-name]\n\n"
"Options:\n"
"-c <file>\tuse case conversion defined in <file>\n"
"-m <mode>\toutput (conflict resolution) mode:\n"
"\t\tdefault - try to guess the best way to resolve a conflict\n"
"\t\tlast - choose last entry\n"
"\t\tdebug - dump all mappings (with rules)\n"
"\t\tduplicates - dump duplicate mappings only (with rules)\n" );
if ( iArgs>1 )
{
printf ( "\n"
"Examples:\n"
"spelldump en.dict en.aff\n"
"spelldump ru.dict ru.aff ru.txt ru_RU.CP1251\n"
"spelldump ru.dict ru.aff ru.txt .1251\n" );
}
return 1;
}
printf ( "Loading dictionary...\n" );
CISpellDict Dict;
if ( !Dict.Load ( sDict.cstr () ) )
sphDie ( "Error loading dictionary file '%s'\n", sDict.IsEmpty () ? "" : sDict.cstr () );
printf ( "Loading affix file...\n" );
CISpellAffix Affix ( sLocale.cstr (), bUseCustomCharset ? sCharsetFile.cstr () : nullptr );
if ( !Affix.Load ( sAffix.cstr () ) )
sphDie ( "Error loading affix file '%s'\n", sAffix.IsEmpty () ? "" : sAffix.cstr () );
if ( sResult.IsEmpty () )
sphDie ( "No result file specified\n" );
FILE * pFile = fopen ( sResult.cstr (), "wt" );
if ( !pFile )
sphDie ( "Unable to open '%s' for writing\n", sResult.cstr () );
if ( eMode!=M_DEFAULT )
printf ( "Output mode: %s\n", dModeName[eMode] );
Dict.IterateStart ();
WordMap_t tWordMap;
const CISpellDict::CISpellDictWord * pWord = nullptr;
int nDone = 0;
while ( ( pWord = Dict.IterateNext () )!=nullptr )
{
EmitResult ( tWordMap, pWord->m_sWord, pWord->m_sWord );
if ( ( ++nDone % 10 )==0 )
{
printf ( "\rDictionary words processed: %d", nDone );
fflush ( stdout );
}
if ( pWord->m_sFlags.IsEmpty() )
continue;
CSphString sWord, sWordForCross;
auto iFlagLen = (int) strlen ( pWord->m_sFlags.cstr () );
for ( int iFlag1 = 0; iFlag1 < iFlagLen; ++iFlag1 )
for ( int iRule1 = 0; iRule1 < Affix.GetNumRules (); ++iRule1 )
{
CISpellAffixRule * pRule1 = Affix.GetRule ( iRule1 );
if ( pRule1->Flag()!=pWord->m_sFlags.cstr()[iFlag1] )
continue;
sWord = pWord->m_sWord;
if ( !pRule1->Apply ( sWord ) )
continue;
EmitResult ( tWordMap, sWord, pWord->m_sWord, pRule1->Flag() );
// apply other rules
if ( !Affix.CheckCrosses() )
continue;
if ( !pRule1->IsCrossProduct() )
continue;
for ( int iFlag2 = iFlag1 + 1; iFlag2 < iFlagLen; ++iFlag2 )
for ( int iRule2 = 0; iRule2 < Affix.GetNumRules (); ++iRule2 )
{
CISpellAffixRule * pRule2 = Affix.GetRule ( iRule2 );
if ( !pRule2->IsCrossProduct () || pRule2->Flag()!=pWord->m_sFlags.cstr()[iFlag2] ||
pRule2->IsPrefix()==pRule1->IsPrefix() )
continue;
sWordForCross = sWord;
if ( pRule2->Apply ( sWordForCross ) )
EmitResult ( tWordMap, sWordForCross, pWord->m_sWord, pRule1->Flag(), pRule2->Flag() );
}
}
}
printf ( "\rDictionary words processed: %d\n", nDone );
// output
CSphVector<const char *> dKeys;
for ( const auto& tWord : tWordMap )
dKeys.Add ( tWord.first.cstr() );
dKeys.Sort ( WordLess() );
ARRAY_FOREACH ( iKey, dKeys )
{
const CSphVector<MapInfo_t> & dWords = tWordMap[dKeys[iKey]];
const char * sKey = dKeys[iKey];
switch ( eMode )
{
case M_LAST:
fprintf ( pFile, "%s > %s\n", sKey, dWords.Last().m_sWord.cstr() );
break;
case M_EXACT_OR_LONGEST:
{
int iMatch = 0;
int iLength = 0;
ARRAY_FOREACH ( iWord, dWords )
{
if ( dWords[iWord].m_sWord==sKey )
{
iMatch = iWord;
break;
}
auto iWordLength = (int)strlen ( dWords[iWord].m_sWord.cstr() );
if ( iWordLength>iLength )
{
iLength = iWordLength;
iMatch = iWord;
}
}
fprintf ( pFile, "%s > %s\n", sKey, dWords[iMatch].m_sWord.cstr() );
break;
}
case M_DUPLICATES:
if ( dWords.GetLength()==1 )
break;
// [[clang::fallthrough]];
case M_DEBUG:
ARRAY_FOREACH ( iWord, dWords )
fprintf ( pFile, "%s > %s %s/%d\n", sKey, dWords[iWord].m_sWord.cstr(), dWords[iWord].m_sRules, dWords.GetLength() );
break;
}
}
fclose ( pFile );
return 0;
}
| 26,700
|
C++
|
.cpp
| 929
| 25.530678
| 140
| 0.631604
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| true
| false
|
16,850
|
stackmock.cpp
|
manticoresoftware_manticoresearch/src/stackmock.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "stackmock.h"
#include "sphinxexpr.h"
#include "coro_stack.h"
#include "coroutine.h"
#include "searchdsql.h"
#include "attribute.h"
#include "querycontext.h"
// hard-coded definitions to avoid probing (that is - to avoid confusing memcheck programs)
// run searchd with --logdebug --console once, read values, then write them here and uncomment these lines
//#define KNOWN_CREATE_SIZE 4208
//#define KNOWN_EXPR_SIZE 48
//#define KNOWN_FILTER_SIZE 400
class StackMeasurer_c
{
protected:
CSphFixedVector<BYTE> m_dMockStack { (int) Threads::DEFAULT_CORO_STACK_SIZE };
int m_iComplexity;
protected:
int CalcUsedStackEdge ( BYTE uFiller )
{
ARRAY_CONSTFOREACH ( i, m_dMockStack )
if ( m_dMockStack[i]!=uFiller )
return m_dMockStack.GetLength ()-i;
return m_dMockStack.GetLength ();
}
void MockInitMem ( BYTE uFiller )
{
::memset ( m_dMockStack.begin (), uFiller, m_dMockStack.GetLengthBytes () );
}
int MeasureStackWithPattern ( BYTE uPattern )
{
MockInitMem ( uPattern );
MockParseTest ();
auto iUsedStackEdge = CalcUsedStackEdge ( uPattern );
return sphRoundUp ( iUsedStackEdge, 4 );
}
int MeasureStack ()
{
auto iStartStackDE = MeasureStackWithPattern ( 0xDE );
auto iStartStackAD = MeasureStackWithPattern ( 0xAD );
return Max ( iStartStackDE, iStartStackAD );
}
virtual void BuildMockExpr ( int iComplexity ) = 0;
virtual void MockParseTest () = 0;
void BuildMockExprWrapper ( int iComplexity )
{
m_iComplexity = iComplexity + 1;
BuildMockExpr ( iComplexity );
}
public:
std::pair<int,int> MockMeasureStack ( int iNodes )
{
BuildMockExprWrapper ( 0 );
int iEmptyVal = MeasureStack ();
int iDelta = 0;
// Find edge of stack where expr length became visible
// (we need quite big expr in order to touch deepest of the stack)
int iHeight = 0;
while ( iDelta<=0 )
{
++iHeight;
BuildMockExprWrapper ( iHeight );
auto iCurStack = MeasureStack ();
iDelta = iCurStack - iEmptyVal;
}
auto iStartStack = iEmptyVal + iDelta;
// add iNodes frames and average stack from them
BuildMockExprWrapper ( iHeight + iNodes );
auto iCurStack = MeasureStack ();
iDelta = iCurStack-iStartStack;
iDelta/=iNodes;
iDelta = sphRoundUp ( iDelta, 16 );
return { iDelta, iEmptyVal };
}
virtual ~StackMeasurer_c () = default;
};
/////////////////////////////////////////////////////////////////////
/// calculate stack for expressions
class CreateExprStackSize_c : public StackMeasurer_c
{
void BuildMockExpr ( int iComplexity ) final
{
m_sExpr.Clear();
m_sExpr << "((attr_a=0)*1)";
for ( int i = 1; i<iComplexity+1; ++i ) // ((attr_a=0)*1) + ((attr_b=1)*3) + ((attr_b=2)*5) + ...
m_sExpr << "+((attr_b=" << i << ")*" << i * 2+1 << ")";
}
void MockParseTest () override
{
struct
{
ExprParseArgs_t m_tArgs;
CSphString m_sError;
CSphSchema m_tSchema;
const char * m_sExpr = nullptr;
bool m_bSuccess = false;
ISphExpr * m_pExprBase = nullptr;
} tParams;
CSphColumnInfo tAttr;
tAttr.m_eAttrType = SPH_ATTR_INTEGER;
tAttr.m_sName = "attr_a";
tParams.m_tSchema.AddAttr ( tAttr, false );
tAttr.m_sName = "attr_b";
tParams.m_tSchema.AddAttr ( tAttr, false );
tParams.m_sExpr = m_sExpr.cstr();
Threads::MockCallCoroutine ( m_dMockStack, [&tParams] {
tParams.m_pExprBase = sphExprParse ( tParams.m_sExpr, tParams.m_tSchema, nullptr, tParams.m_sError, tParams.m_tArgs );
} );
tParams.m_bSuccess = !!tParams.m_pExprBase;
SafeRelease ( tParams.m_pExprBase );
if ( !tParams.m_bSuccess || !tParams.m_sError.IsEmpty () )
sphWarning ( "stack check expression error: %s", tParams.m_sError.cstr () );
}
protected:
StringBuilder_c m_sExpr;
public:
static std::pair<int, int> MockMeasure();
static void PublishValue ( std::pair<int, int> tStack);
};
// measure stack for evaluate expression
class EvalExprStackSize_c : public CreateExprStackSize_c
{
void MockParseTest () override
{
struct
{
ExprParseArgs_t m_tArgs;
CSphString m_sError;
CSphSchema m_tSchema;
const char * m_sExpr = nullptr;
bool m_bSuccess = false;
ISphExpr * m_pExprBase = nullptr;
CSphMatch m_tMatch;
} tParams;
CSphColumnInfo tAttr;
tAttr.m_eAttrType = SPH_ATTR_INTEGER;
tAttr.m_sName = "attr_a";
tParams.m_tSchema.AddAttr ( tAttr, false );
tAttr.m_sName = "attr_b";
tParams.m_tSchema.AddAttr ( tAttr, false );
CSphFixedVector<CSphRowitem> dRow { tParams.m_tSchema.GetRowSize () };
auto * pRow = dRow.Begin();
for ( int i = 1; i<tParams.m_tSchema.GetAttrsCount (); ++i )
sphSetRowAttr ( pRow, tParams.m_tSchema.GetAttr ( i ).m_tLocator, i );
sphSetRowAttr ( pRow, tParams.m_tSchema.GetAttr ( 0 ).m_tLocator, 123 );
tParams.m_tMatch.m_tRowID = 123;
tParams.m_tMatch.m_iWeight = 456;
tParams.m_tMatch.m_pStatic = pRow;
tParams.m_sExpr = m_sExpr.cstr();
{ // parse in dedicated coro (hope, 100K frame per level should fit any arch)
CSphFixedVector<BYTE> dSafeStack { m_iComplexity * 100 * 1024 };
Threads::MockCallCoroutine ( dSafeStack, [&tParams] { // do in coro as for fat expr it might already require dedicated stack
tParams.m_pExprBase = sphExprParse ( tParams.m_sExpr, tParams.m_tSchema, nullptr, tParams.m_sError, tParams.m_tArgs );
});
}
tParams.m_bSuccess = !!tParams.m_pExprBase;
assert ( tParams.m_pExprBase );
Threads::MockCallCoroutine ( m_dMockStack, [&tParams] {
tParams.m_pExprBase->Eval ( tParams.m_tMatch );
} );
if ( !tParams.m_bSuccess || !tParams.m_sError.IsEmpty () )
sphWarning ( "stack check expression error: %s", tParams.m_sError.cstr () );
}
public:
static std::pair<int, int> MockMeasure();
static void PublishValue ( std::pair<int, int> tStack );
};
/////////////////////////////////////////////////////////////////////
class FilterCreationMeasureStack_c : public StackMeasurer_c
{
void BuildMockExpr ( int iComplexity ) final
{
m_sQuery.Clear ();
m_sQuery << "select * from test where id between 1 and 10";
for ( int i = 0; i<iComplexity; i++ )
m_sQuery << " OR id between 1 and 10";
}
void MockParseTest () final
{
struct
{
CSphString m_sQuery;
CSphVector<SqlStmt_t> m_dStmt;
CSphSchema m_tSchema;
CSphString m_sError;
bool m_bSuccess = false;
} tParams;
tParams.m_sQuery = m_sQuery.cstr();
CSphColumnInfo tAttr;
tAttr.m_eAttrType = SPH_ATTR_BIGINT;
tAttr.m_sName = sphGetDocidName ();
tParams.m_tSchema.AddAttr ( tAttr, false );
Threads::MockCallCoroutine ( m_dMockStack, [&tParams] {
tParams.m_bSuccess = sphParseSqlQuery ( FromStr ( tParams.m_sQuery ), tParams.m_dStmt, tParams.m_sError, SPH_COLLATION_DEFAULT );
if ( !tParams.m_bSuccess )
return;
const CSphQuery & tQuery = tParams.m_dStmt[0].m_tQuery;
CreateFilterContext_t tFCtx;
tFCtx.m_pFilters = &tQuery.m_dFilters;
tFCtx.m_pFilterTree = &tQuery.m_dFilterTree;
tFCtx.m_pMatchSchema = &tParams.m_tSchema;
tFCtx.m_pIndexSchema = &tParams.m_tSchema;
tFCtx.m_bScan = true;
CSphString sWarning;
CSphQueryContext tCtx ( tQuery );
tParams.m_bSuccess = tCtx.CreateFilters ( tFCtx, tParams.m_sError, sWarning );
} );
if ( !tParams.m_bSuccess || !tParams.m_sError.IsEmpty () )
sphWarning ( "stack check filter error: %s", tParams.m_sError.cstr () );
}
protected:
StringBuilder_c m_sQuery;
public:
static std::pair<int, int> MockMeasure();
static void PublishValue ( std::pair<int, int> tStack );
};
/////////////////////////////////////////////////////////////////////
/// calculate stack for FT
#include "searchdaemon.h"
#include "sphinxsort.h"
#include "binlog.h"
class FullTextStackSize_c: public StackMeasurer_c
{
void BuildMockExpr ( int iComplexity ) final
{
m_sExpr.Clear();
m_sExpr << "(";
for ( int i = 0; i < iComplexity; ++i )
m_sExpr << "a ";
m_sExpr << "b \"a b\") | ( a -b )";
}
void MockParseTest() override
{
struct
{
CSphQuery tQuery;
CSphQueryResult tQueryResult;
CSphMultiQueryArgs tArgs { 1 };
SphQueueRes_t tRes;
ISphMatchSorter* pSorter;
} tParams;
AggrResult_t tResult;
tParams.tQueryResult.m_pMeta = &tResult;
tParams.tQuery.m_sQuery = m_sExpr.operator CSphString();
auto pParser = sphCreatePlainQueryParser();
tParams.tQuery.m_pQueryParser = pParser.get();
SphQueueSettings_t tQueueSettings ( m_pRtIndex->GetMatchSchema() );
tParams.pSorter = sphCreateQueue ( tQueueSettings, tParams.tQuery, tResult.m_sError, tParams.tRes );
Threads::MockCallCoroutine ( m_dMockStack, [this,&tParams] {
m_pRtIndex->MultiQuery ( tParams.tQueryResult, tParams.tQuery, { &tParams.pSorter, 1 }, tParams.tArgs );
} );
SafeDelete ( tParams.pSorter );
}
StringBuilder_c m_sExpr;
std::unique_ptr<RtIndex_i> m_pRtIndex;
public:
static std::pair<int,int> MockMeasure();
static void PublishValue ( std::pair<int, int> tStack );
FullTextStackSize_c()
{
CSphDictSettings tDictSettings;
auto pTok = Tokenizer::Detail::CreateUTF8Tokenizer();
CSphSchema tSrcSchema;
tSrcSchema.AddField ( "text" );
CSphColumnInfo tCol ( sphGetDocidName() );
tCol.m_eAttrType = SPH_ATTR_BIGINT;
tSrcSchema.AddAttr ( tCol, true );
CSphString sError;
DictRefPtr_c pDict { sphCreateDictionaryCRC ( tDictSettings, nullptr, pTok, "none", false, 32, nullptr, sError ) };
CSphSchema tSchema;
tSchema.AddField ( "text" );
tSchema.AddAttr ( tCol, false );
m_pRtIndex = sphCreateIndexRT ( "testrt", "fake", tSchema, 32 * 1024 * 1024, false );
m_pRtIndex->SetTokenizer ( pTok->Clone ( SPH_CLONE_INDEX ) );
m_pRtIndex->SetDictionary ( pDict->Clone() );
m_pRtIndex->ProhibitSave();
m_pRtIndex->PostSetup();
InsertDocData_c tDoc ( m_pRtIndex->GetMatchSchema() );
tDoc.SetID ( 1 );
tDoc.m_dFields[0] = { "a b", 3 };
auto& bRTChangesAllowed = RTChangesAllowed();
assert ( !bRTChangesAllowed ); // we expect to be run at very beginning, so changes are NOT allowed at that moment.
bRTChangesAllowed = true; // we don't care about previous value
RtAccum_t tAcc;
CSphString sFilter, sWarning;
m_pRtIndex->AddDocument ( tDoc, false, sFilter, sError, sWarning, &tAcc );
bool bOldBinlog = Binlog::MockDisabled ( true );
{ // commit requires coro ctx (hope, 100K frame per level should fit any arch)
CSphFixedVector<BYTE> dSafeStack { 100 * 1024 };
Threads::MockCallCoroutine ( dSafeStack, [this, &tAcc] { // do in coro as for fat expr it might already require dedicated stack
m_pRtIndex->Commit ( nullptr, &tAcc );
} );
}
Binlog::MockDisabled ( bOldBinlog );
bRTChangesAllowed = false;
}
};
#if defined( __clang__ ) || defined( __GNUC__ )
#define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__ ( ( no_sanitize_address ) )
#else
#define ATTRIBUTE_NO_SANITIZE_ADDRESS
#endif
ATTRIBUTE_NO_SANITIZE_ADDRESS std::pair<int, int> CreateExprStackSize_c::MockMeasure()
{
CreateExprStackSize_c tCreateMeter;
return tCreateMeter.MockMeasureStack ( 5 );
}
ATTRIBUTE_NO_SANITIZE_ADDRESS std::pair<int, int> EvalExprStackSize_c::MockMeasure()
{
EvalExprStackSize_c tEvalMeter;
return tEvalMeter.MockMeasureStack ( 20 );
}
ATTRIBUTE_NO_SANITIZE_ADDRESS std::pair<int, int> FilterCreationMeasureStack_c::MockMeasure()
{
FilterCreationMeasureStack_c tCreateMeter;
return tCreateMeter.MockMeasureStack ( 100 );
}
ATTRIBUTE_NO_SANITIZE_ADDRESS std::pair<int, int> FullTextStackSize_c::MockMeasure()
{
FullTextStackSize_c tCreateMeter;
const int START = 128;
const int STEP = 64;
auto x = tCreateMeter.MockMeasureStack ( START );
for ( auto i=0; i<10; ++i )
{
if ( x.first )
return x;
x = tCreateMeter.MockMeasureStack ( START + STEP * i );
}
return x;
}
void CreateExprStackSize_c::PublishValue ( std::pair<int, int> iStack )
{
SetExprNodeStackItemSize ( iStack.first, 0 );
}
void EvalExprStackSize_c::PublishValue ( std::pair<int, int> iStack )
{
SetExprNodeStackItemSize ( 0, iStack.first );
}
void FilterCreationMeasureStack_c::PublishValue ( std::pair<int, int> iStack )
{
SetFilterStackItemSize ( iStack );
}
void FullTextStackSize_c::PublishValue ( std::pair<int, int> iStack )
{
SetExtNodeStackSize ( iStack.first, iStack.second );
}
template<typename MOCK, int FRAMEVAL=0, int INITVAL=0>
ATTRIBUTE_NO_SANITIZE_ADDRESS void DetermineStackSize ( const char* szReport, const char* szEnv )
{
int iFrameSize = FRAMEVAL;
int iInitSize = INITVAL;
std::pair<int,int> tNewSize {0,0};
bool bMocked = false;
if ( !FRAMEVAL || Threads::StackMockingAllowed() )
{
StringBuilder_c sName;
sName << "MANTICORE_" << szEnv;
tNewSize.first = val_from_env ( sName.cstr(), 0 );
if ( !tNewSize.first )
{
tNewSize = MOCK::MockMeasure();
bMocked = true;
#ifdef NDEBUG
if ( FRAMEVAL && FRAMEVAL < tNewSize.first )
sphLogDebug ( "Compiled-in value %s (%d) is less than measured (%d).", szEnv, FRAMEVAL, tNewSize.first );
#endif
}
iFrameSize = tNewSize.first;
if ( bMocked )
sphLogDebug ( "Frame %s is %d (mocked, as no env MANTICORE_%s=%d found)", szReport, iFrameSize, szEnv, iFrameSize );
else
sphLogDebug ( "Frame %s %d (from env MANTICORE_%s)", szReport, iFrameSize, szEnv );
} else
{
sphLogDebug ( "Frame %s is %d (compiled-in)", szReport, iFrameSize );
}
if ( !INITVAL || Threads::StackMockingAllowed() )
{
StringBuilder_c sName;
sName << "MANTICORE_START_" << szEnv;
tNewSize.second = val_from_env ( sName.cstr(), tNewSize.second );
if ( !bMocked && !tNewSize.second )
{
tNewSize = MOCK::MockMeasure();
bMocked = true;
#ifdef NDEBUG
if ( INITVAL && INITVAL < tNewSize.second )
sphLogDebug ( "Compiled-in value start_%s (%d) is less than measured (%d).", szEnv, INITVAL, tNewSize.second );
#endif
}
iInitSize = tNewSize.second;
if ( bMocked )
sphLogDebug ( "Starting %s is %d (mocked, as no env MANTICORE_START_%s=%d found)", szReport, iInitSize, szEnv, iInitSize );
else
sphLogDebug ( "Starting %s %d (from env MANTICORE_START_%s)", szReport, iInitSize, szEnv );
} else
{
sphLogDebug ( "Starting %s is %d (compiled-in)", szReport, iInitSize );
}
MOCK::PublishValue ( tNewSize );
}
void DetermineNodeItemStackSize()
{
// some values for x86_64: clang 12.0.1 relwithdebinfo = 768, debug = 4208. gcc 9.3 relwithdebinfo = 16, debug = 256
#ifdef KNOWN_CREATE_SIZE
DetermineStackSize<CreateExprStackSize_c, KNOWN_CREATE_SIZE>
#else
DetermineStackSize<CreateExprStackSize_c>
#endif
( "expression stack for creation", "KNOWN_CREATE_SIZE" );
// some values for x86_64: clang 12.0.1 relwithdebinfo = 32, debug = 48. gcc 9.3 relwithdebinfo = 48, debug = 48
#ifdef KNOWN_EXPR_SIZE
DetermineStackSize<EvalExprStackSize_c, KNOWN_EXPR_SIZE>
#else
DetermineStackSize<EvalExprStackSize_c>
#endif
( "expression stack for eval/deletion", "KNOWN_EXPR_SIZE" );
}
void DetermineFilterItemStackSize ()
{
// some values for x86_64: clang 12.0.1 relwithdebinfo = 208, debug = 400. gcc 9.3 relwithdebinfo = 240, debug = 272
#ifdef KNOWN_FILTER_SIZE
DetermineStackSize<FilterCreationMeasureStack_c, KNOWN_FILTER_SIZE>
#else
DetermineStackSize<FilterCreationMeasureStack_c>
#endif
( "filter stack delta", "KNOWN_FILTER_SIZE" );
}
void DetermineMatchStackSize()
{
#ifdef KNOWN_MATCH_SIZE
#ifdef START_KNOWN_MATCH_SIZE
DetermineStackSize<FullTextStackSize_c, KNOWN_MATCH_SIZE, START_KNOWN_MATCH_SIZE>
#else
DetermineStackSize<FullTextStackSize_c, KNOWN_MATCH_SIZE>
#endif
#else
DetermineStackSize<FullTextStackSize_c, 0>
#endif
( "fulltext match stack delta", "KNOWN_MATCH_SIZE" );
}
| 15,896
|
C++
|
.cpp
| 453
| 32.459161
| 132
| 0.711401
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,851
|
searchdaemon.cpp
|
manticoresoftware_manticoresearch/src/searchdaemon.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
/// @file searchdaemon.cpp
/// Definitions for the stuff need by searchd to work and serve the indexes.
#include "sphinxstd.h"
#include "searchdaemon.h"
#include "coroutine.h"
#include <optional>
#if _WIN32
#define USE_PSI_INTERFACE 1
// for MAC address
#include <iphlpapi.h>
#pragma message("Automatically linking with iphlpapi.lib")
#pragma comment(lib, "iphlpapi.lib")
#pragma comment(linker, "/defaultlib:WS2_32.Lib")
#pragma message("Automatically linking with WS2_32.Lib")
// socket function definitions
#pragma comment(linker, "/defaultlib:wsock32.lib")
#pragma message("Automatically linking with wsock32.lib")
#else
#include <netdb.h>
// for MAC address
#include <net/if.h>
#include <sys/ioctl.h>
#include <net/ethernet.h>
// TCP_NODELAY, TCP_FASTOPEN, etc.
#include <netinet/tcp.h>
#endif
// for FreeBSD
#if defined(__FreeBSD__)
#include <sys/sysctl.h>
#include <net/route.h>
#include <net/if_dl.h>
#include <netinet/in.h>
#endif
#include <cmath>
/////////////////////////////////////////////////////////////////////////////
// MISC GLOBALS
/////////////////////////////////////////////////////////////////////////////
const char * szCommand ( int eCmd)
{
const char* szCommands[SEARCHD_COMMAND_TOTAL] = {"command_search", "command_excerpt", "command_update",
"command_keywords", "command_persist", "command_status", "gap_6", "command_flushattrs", "command_sphinxql",
"command_ping", "command_delete", "command_set", "command_insert", "command_replace", "command_commit",
"command_suggest", "command_json", "command_callpq", "command_cluster", "command_getfield"};
if ( eCmd<SEARCHD_COMMAND_TOTAL )
return szCommands[eCmd];
return "***WRONG COMMAND!***";
}
// 'like' matcher
CheckLike::CheckLike( const char* sPattern )
{
if ( !sPattern )
return;
m_sPattern.Reserve( 2 * (int) strlen( sPattern ));
char* d = const_cast<char*> ( m_sPattern.cstr());
// remap from SQL LIKE syntax to Sphinx wildcards syntax
// '_' maps to '?', match any single char
// '%' maps to '*', match zero or mor chars
for ( const char* s = sPattern; *s; ++s )
{
switch ( *s )
{
case '_': *d++ = '?';
break;
case '%': *d++ = '*';
break;
case '?': *d++ = '\\';
*d++ = '?';
break;
case '*': *d++ = '\\';
*d++ = '*';
break;
default: *d++ = *s;
break;
}
}
*d = '\0';
}
bool CheckLike::Match ( const char* sValue ) const noexcept
{
return sValue && ( m_sPattern.IsEmpty() || sphWildcardMatch ( sValue, m_sPattern.cstr() ) );
}
// string vector with 'like' matcher
/////////////////////////////////////////////////////////////////////////////
VectorLike::VectorLike( int iCols )
: CheckLike( nullptr )
{
m_dHeadNames.Resize ( iCols );
}
VectorLike::VectorLike ( const CSphString & sPattern )
: CheckLike ( sPattern.cstr () )
{
m_dHeadNames.Resize ( 2 );
SetColName ( "Variable_name" );
SetColName ( "Value", 1 );
}
VectorLike::VectorLike( const CSphString& sPattern, int iCols )
: CheckLike ( sPattern.cstr () )
{
m_dHeadNames.Resize ( iCols );
}
VectorLike::VectorLike ( const CSphString & sPattern, std::initializer_list<const char *> sCols )
: CheckLike ( sPattern.cstr () )
{
for ( const char * szCol : sCols )
m_dHeadNames.Add ( szCol );
}
void VectorLike::SetColNames ( std::initializer_list<const char *> sCols )
{
for ( const char * szCol : sCols )
m_dHeadNames.Add ( szCol );
}
void VectorLike::SetColName ( CSphString sValue, int iIdx )
{
assert ( iIdx>=0 && iIdx<m_dHeadNames.GetLength () );
m_dHeadNames[iIdx] = std::move(sValue);
}
const VecTraits_T<CSphString> & VectorLike::Header () const
{
return m_dHeadNames;
}
bool VectorLike::MatchAdd( const char* sValue )
{
assert ( m_dHeadNames.GetLength ()>=1 );
if ( Match( sValue ))
{
Add( sValue );
return true;
}
return false;
}
bool VectorLike::MatchAddf ( const char* sTemplate, ... )
{
assert ( m_dHeadNames.GetLength ()>=1 );
va_list ap;
CSphString sValue;
va_start ( ap, sTemplate );
sValue.SetSprintfVa( sTemplate, ap );
va_end ( ap );
return MatchAdd( sValue.cstr());
}
bool VectorLike::Matchf ( const char* sTemplate, ... ) const noexcept
{
assert ( m_dHeadNames.GetLength() >= 1 );
va_list ap;
CSphString sValue;
va_start ( ap, sTemplate );
sValue.SetSprintfVa ( sTemplate, ap );
va_end ( ap );
return Match ( sValue.cstr() );
}
void VectorLike::Addf ( const char * sValueTmpl, ... )
{
va_list ap;
StringBuilder_c sValue;
va_start ( ap, sValueTmpl );
sValue.vSprintf ( sValueTmpl, ap );
va_end ( ap );
Add ( sValue.cstr () );
}
void VectorLike::MatchTuplet ( const char * sKey, const char * sValue )
{
assert ( m_dHeadNames.GetLength ()>=2 );
if ( !Match ( sKey ) )
return;
Add ( sKey );
Add ( sValue );
FillTail ( 2 );
}
void VectorLike::MatchTupletf ( const char * sKey, const char * sValueTmpl, ... )
{
assert ( m_dHeadNames.GetLength ()>=2 );
if ( !Match ( sKey ) )
return;
va_list ap;
StringBuilder_c sValue;
va_start ( ap, sValueTmpl );
sValue.vSprintf( sValueTmpl, ap );
va_end ( ap );
Add ( sKey );
Add ( sValue.cstr() );
FillTail ( 2 );
}
void VectorLike::MatchTupletFn ( const char * sKey, Generator_fn && fnValuePrinter )
{
assert ( m_dHeadNames.GetLength ()>=2 );
if ( !Match ( sKey ) )
return;
Add ( sKey );
Add ( fnValuePrinter () );
FillTail ( 2 );
}
void VectorLike::MatchTupletFn ( const char * sKey, GeneratorS_fn && fnValuePrinter )
{
assert ( m_dHeadNames.GetLength ()>=2 );
if ( !Match ( sKey ) )
return;
Add ( sKey );
Add ( CSphString ( fnValuePrinter () ) );
FillTail ( 2 );
}
void VectorLike::FillTail ( int iHas )
{
for ( auto iLen = m_dHeadNames.GetLength (); iHas<iLen; ++iHas )
Add("");
}
const char* GetIndexTypeName ( IndexType_e eType )
{
switch ( eType )
{
case IndexType_e::PLAIN : return "plain";
case IndexType_e::TEMPLATE: return "template";
case IndexType_e::RT: return "rt";
case IndexType_e::PERCOLATE: return "percolate";
case IndexType_e::DISTR: return "distributed";
default: return "invalid";
}
}
IndexType_e TypeOfIndexConfig( const CSphString& sType )
{
if ( sType=="distributed" )
return IndexType_e::DISTR;
if ( sType=="rt" )
return IndexType_e::RT;
if ( sType=="percolate" )
return IndexType_e::PERCOLATE;
if ( sType=="template" )
return IndexType_e::TEMPLATE;
if (( sType.IsEmpty() || sType=="plain" ))
return IndexType_e::PLAIN;
return IndexType_e::ERROR_;
}
static void MaybeFatalLog ( CSphString * pFatal, const char * sTemplate, ... )
{
va_list ap;
va_start ( ap, sTemplate );
if ( pFatal )
pFatal->SetSprintfVa ( sTemplate, ap );
else
sphFatalVa ( sTemplate, ap );
va_end ( ap );
}
bool CheckPort ( int iPort, CSphString * pFatal )
{
if ( !IsPortInRange ( iPort ) )
{
MaybeFatalLog ( pFatal, "port %d is out of range", iPort );
return false;
}
return true;
}
// check only proto name in lowcase, no '_vip'
static Proto_e SimpleProtoByName ( const CSphString& sProto, CSphString * pFatal )
{
if ( sProto=="" )
return Proto_e::SPHINX;
if ( sProto=="mysql41" || sProto=="mysql" )
return Proto_e::MYSQL41;
if ( sProto=="http" )
return Proto_e::HTTP;
if ( sProto=="https" )
return Proto_e::HTTPS;
if ( sProto=="replication" )
return Proto_e::REPLICATION;
if ( sProto=="sphinx" )
return Proto_e::SPHINXSE;
MaybeFatalLog ( pFatal, "unknown listen protocol type '%s'", sProto.scstr());
return Proto_e::UNKNOWN;
}
static bool ProtoByName ( CSphString sFullProto, ListenerDesc_t & tDesc, CSphString * pFatal )
{
sFullProto.ToLower();
StrVec_t dParts;
sphSplit( dParts, sFullProto.cstr(), "_" );
if ( !dParts.IsEmpty() )
{
tDesc.m_eProto = SimpleProtoByName( dParts[0], pFatal );
if ( tDesc.m_eProto==Proto_e::UNKNOWN )
return false;
}
if ( dParts.GetLength() == 1 )
return true;
if ( dParts.GetLength() >= 2 )
{
bool bOk = dParts.GetLength() == 2;
if ( dParts[1] == "vip" )
tDesc.m_bVIP = true;
else if ( dParts[1] == "readonly" )
tDesc.m_bReadOnly = true;
else
bOk = false;
if ( bOk )
return true;
}
if ( dParts.GetLength() == 3 && dParts[2] == "readonly" )
{
tDesc.m_bReadOnly = true;
return true;
}
MaybeFatalLog ( pFatal, "unknown listen protocol type '%s'", sFullProto.scstr() );
return false;
}
/// listen = ( address ":" port | port | path | address ":" port start - port end ) [ ":" protocol ] [ "_vip" ]
ListenerDesc_t ParseResolveListener ( const char* sSpec, bool bResolve, CSphString* pFatal )
{
ListenerDesc_t tRes;
tRes.m_eProto = Proto_e::SPHINX;
tRes.m_uIP = htonl(INADDR_ANY);
tRes.m_iPort = SPHINXAPI_PORT;
tRes.m_iPortsCount = 0;
tRes.m_bVIP = false;
tRes.m_bReadOnly = false;
// split by colon
auto dParts = sphSplit( sSpec, ":" ); // diff. parts are :-separated
int iParts = dParts.GetLength();
if ( iParts>3 )
{
MaybeFatalLog ( pFatal, "invalid listen format (too many fields)" );
return {};
}
assert ( iParts>=1 && iParts<=3 );
// handle UNIX socket case
// might be either name on itself (1 part), or name+protocol (2 parts)
if ( *dParts[0].scstr()=='/' )
{
if ( iParts>2 )
{
MaybeFatalLog ( pFatal, "invalid listen format (too many fields)" );
return {};
}
if ( iParts==2 && !ProtoByName ( dParts[1], tRes, pFatal ) )
return {};
tRes.m_sUnix = dParts[0];
// MOVED!!! check outside ParseListener in order to make tests consistent despite platforms
#if _WIN32
MaybeFatalLog ( pFatal, "UNIX sockets are not supported on Windows" );
return {};
#else
return tRes;
#endif
}
// check if it all starts with a valid port number
auto sPart = dParts[0].cstr();
auto iLen = (int) strlen( sPart );
bool bAllDigits = true;
for ( int i = 0; i<iLen && bAllDigits; ++i )
if ( !isdigit( sPart[i] ))
bAllDigits = false;
int iPort = 0;
if ( bAllDigits && iLen<=5 ) // if we have num from only digits, it may be only port, nothing else!
{
iPort = atol( sPart );
if ( !CheckPort ( iPort, pFatal ) ) // lets forbid ambiguous magic like 0:sphinx or 99999:mysql41
return {};
}
// handle TCP port case
// one part. might be either port name, or host name (unix socked case is already parsed)
if ( iParts==1 )
{
if ( iPort )
{
// port name on itself
tRes.m_iPort = iPort;
} else
{
// host name on itself
tRes.m_sAddr = sSpec;
tRes.m_uIP = bResolve ? sphGetAddress ( sSpec, ( pFatal==nullptr ), false, pFatal ) : 0;
if ( pFatal && !pFatal->IsEmpty() )
return {};
}
return tRes;
}
// two or three parts
if ( iPort )
{
// 1st part is a valid port number; must be port:proto
if ( iParts!=2 )
{
MaybeFatalLog ( pFatal, "invalid listen format (expected port:proto, got extra trailing part in listen=%s)", sSpec );
return {};
}
tRes.m_iPort = iPort;
if ( !ProtoByName ( dParts[1], tRes, pFatal ) )
return {};
return tRes;
}
// 1st part must be a host name; must be: host:port[:proto]
if ( iParts==3 && !ProtoByName ( dParts[2], tRes, pFatal ) )
return {};
if ( dParts[0].IsEmpty() )
{
tRes.m_uIP = htonl(INADDR_ANY);
} else
{
tRes.m_sAddr = dParts[0];
tRes.m_uIP = bResolve ? sphGetAddress ( dParts[0].cstr(), ( pFatal==nullptr ), false, pFatal ) : 0;
if ( pFatal && !pFatal->IsEmpty() )
return {};
}
auto dPorts = sphSplit( dParts[1].scstr(), "-" );
tRes.m_iPort = atoi( dPorts[0].cstr());
if ( !CheckPort( tRes.m_iPort, pFatal ) )
return {};
if ( dPorts.GetLength()==2 )
{
int iPortsEnd = atoi( dPorts[1].scstr() );
if ( !CheckPort ( iPortsEnd, pFatal ) )
return {};
int iPortsCount = iPortsEnd - tRes.m_iPort + 1;
if ( iPortsEnd<=tRes.m_iPort )
{
MaybeFatalLog ( pFatal, "ports range invalid %d-%d", tRes.m_iPort, iPortsEnd );
return {};
}
if ( iPortsCount<2 )
{
MaybeFatalLog( pFatal, "ports range %d-%d count should be at least 2, got %d", tRes.m_iPort, iPortsEnd, iPortsCount );
return {};
}
tRes.m_iPortsCount = iPortsCount;
}
return tRes;
}
ListenerDesc_t ParseListener ( const char* sSpec, CSphString* pFatal )
{
return ParseResolveListener ( sSpec, true, pFatal );
}
/////////////////////////////////////////////////////////////////////////////
// NETWORK SOCKET WRAPPERS
/////////////////////////////////////////////////////////////////////////////
#if _WIN32
const char * sphSockError ( int iErr )
{
if ( iErr==0 )
iErr = WSAGetLastError ();
static char sBuf [ 256 ];
_snprintf ( sBuf, sizeof(sBuf), "WSA error %d", iErr );
return sBuf;
}
#else
const char* sphSockError( int )
{
return strerrorm(errno);
}
#endif
int sphSockGetErrno()
{
#if _WIN32
return WSAGetLastError();
#else
return errno;
#endif
}
void sphSockSetErrno( int iErr )
{
#if _WIN32
WSASetLastError ( iErr );
#else
errno = iErr;
#endif
}
int sphSockPeekErrno()
{
int iRes = sphSockGetErrno();
sphSockSetErrno( iRes );
return iRes;
}
int sphSetSockNB( int iSock )
{
#if _WIN32
u_long uMode = 1;
return ioctlsocket ( iSock, FIONBIO, &uMode );
#else
return fcntl( iSock, F_SETFL, O_NONBLOCK );
#endif
}
void sphSetSockNodelay ( int iSock )
{
#ifdef TCP_NODELAY
int iOn = 1;
if ( setsockopt ( iSock, IPPROTO_TCP, TCP_NODELAY, (char*)&iOn, sizeof ( iOn ) )<0 )
sphWarning ( "failed to set nodelay option: %s", sphSockError() );
#endif
}
void sphSetSockReuseAddr ( int iSock )
{
int iOn = 1;
if ( setsockopt ( iSock, SOL_SOCKET, SO_REUSEADDR, (char *) &iOn, sizeof ( iOn ) ) )
sphWarning ( "setsockopt(SO_REUSEADDR) failed: %s", sphSockError () );
}
void sphSetSockReusePort ( int iSock )
{
#if HAVE_SO_REUSEPORT
int iOn = 1;
if ( setsockopt ( iSock, SOL_SOCKET, SO_REUSEPORT, (char *) &iOn, sizeof ( iOn ) ) )
sphWarning ( "setsockopt(SO_REUSEPORT) failed: %s", sphSockError () );
#endif
}
void sphSetSockTFO ( int iSock)
{
#if defined (TCP_FASTOPEN)
int iOn = 1;
if ( setsockopt ( iSock, IPPROTO_TCP, TCP_FASTOPEN, (char *) &iOn, sizeof ( iOn ) ) )
sphLogDebug ( "setsockopt(TCP_FASTOPEN) failed: %s", sphSockError () );
#endif
}
#if _WIN32
/// on Windows, the wrapper just prevents the warnings
#pragma warning(push) // store current warning values
#pragma warning(disable:4127) // conditional expr is const
#pragma warning(disable:4389) // signed/unsigned mismatch
static void FDSet ( int fd, fd_set * fdset )
{
FD_SET ( fd, fdset );
}
#pragma warning(pop) // restore warnings
#else // !_WIN32
#if !HAVE_POLL
#define SPH_FDSET_OVERFLOW( _fd ) ( (_fd)<0 || (_fd)>=(int)FD_SETSIZE )
/// on UNIX, we also check that the descript won't corrupt the stack
static void FDSet( int fd, fd_set* set )
{
if ( SPH_FDSET_OVERFLOW( fd ))
sphFatal( "FDSet() failed fd=%d, FD_SETSIZE=%d", fd, FD_SETSIZE );
else
FD_SET ( fd, set );
}
#endif // !HAVE_POLL
#endif // _WIN32
/// wait until socket is readable or writable
int sphPoll( int iSock, int64_t tmTimeout, bool bWrite )
{
// don't need any epoll/kqueue here, since we check only 1 socket
#if HAVE_POLL
struct pollfd pfd;
pfd.fd = iSock;
pfd.events = bWrite ? POLLOUT : POLLIN;
return ::poll( &pfd, 1, int( tmTimeout / 1000 ));
#else
fd_set fdSet;
FD_ZERO ( &fdSet );
FDSet ( iSock, &fdSet );
struct timeval tv;
tv.tv_sec = (int)( tmTimeout / 1000000 );
tv.tv_usec = (int)( tmTimeout % 1000000 );
return ::select ( iSock+1, bWrite ? NULL : &fdSet, bWrite ? &fdSet : NULL, NULL, &tv );
#endif
}
static bool IsLocalhost ( DWORD uAddr )
{
return ( ( ntohl ( uAddr )>>24)==127 );
}
DWORD sphGetAddress ( const char * sHost, bool bFatal, bool bIP, CSphString * pFatal )
{
struct addrinfo tHints, * pResult = nullptr;
memset( &tHints, 0, sizeof( tHints ));
tHints.ai_family = AF_INET;
tHints.ai_socktype = SOCK_STREAM;
if ( bIP )
tHints.ai_flags = AI_NUMERICHOST;
int iResult = getaddrinfo( sHost, nullptr, &tHints, &pResult );
auto pResFree = AtScopeExit ( [pResult] { if (pResult) freeaddrinfo( pResult ); } );
if ( iResult!=0 || !pResult )
{
if ( pFatal )
pFatal->SetSprintf ( "no AF_INET address found for: %s, error %d: %s", sHost, iResult, gai_strerror(iResult) );
else if ( bFatal )
sphFatal( "no AF_INET address found for: %s, error %d: %s", sHost, iResult, gai_strerror(iResult) );
else
sphLogDebugv( "no AF_INET address found for: %s, error %d: %s", sHost, iResult, gai_strerror(iResult) );
return 0;
}
assert ( pResult );
auto * pSockaddr_ipv4 = ( struct sockaddr_in* ) pResult->ai_addr;
DWORD uAddr = pSockaddr_ipv4->sin_addr.s_addr;
if ( pResult->ai_next )
{
const bool bLocalHost = IsLocalhost ( uAddr );
std::array<char, SPH_ADDRESS_SIZE + 1> sAddrBuf{};
StringBuilder_c sBuf( "; ip=", "ip=" );
while ( pResult )
{
auto * pAddr = ( struct sockaddr_in *)pResult->ai_addr;
DWORD uNextAddr = pAddr->sin_addr.s_addr;
sphFormatIP( sAddrBuf.data(), sAddrBuf.size(), uNextAddr );
sBuf += sAddrBuf.data(); // can not use << as builder appends string buffer with tail '\0' and next chunks are invisible
pResult = pResult->ai_next;
if ( bLocalHost && !IsLocalhost ( uNextAddr ) )
uAddr = uNextAddr;
}
sphFormatIP( sAddrBuf.data(), sAddrBuf.size(), uAddr );
sphWarning( "multiple addresses (%s) found for '%s', using first one (%s)", sBuf.cstr(), sHost, sAddrBuf.data() );
}
return uAddr;
}
/// formats IP address given in network byte order into sBuffer
/// returns the buffer
char* sphFormatIP( char* sBuffer, int iBufferSize, DWORD uAddress )
{
const BYTE* a = ( const BYTE* ) &uAddress;
snprintf( sBuffer, iBufferSize, "%u.%u.%u.%u", a[0], a[1], a[2], a[3] );
return sBuffer;
}
bool IsPortInRange( int iPort )
{
return ( iPort>0 ) && ( iPort<=0xFFFF );
}
/////////////////////////////////////////////////////////////////////////////
// NETWORK BUFFERS
/////////////////////////////////////////////////////////////////////////////
ISphOutputBuffer::ISphOutputBuffer()
{
m_dBuf.Reserve( NETOUTBUF );
}
// construct via adopting external buf
ISphOutputBuffer::ISphOutputBuffer( CSphVector<BYTE>& dChunk )
{
m_dBuf.SwapData( dChunk );
}
void ISphOutputBuffer::SendString( const char* sStr )
{
int iLen = sStr ? (int) strlen( sStr ) : 0;
SendInt( iLen );
SendBytes( sStr, iLen );
}
void ISphOutputBuffer::SendString ( const Str_t& sStr )
{
SendInt ( sStr.second );
SendBytes ( sStr );
}
void SendString ( const Str_t& sStr );
/// SmartOutputBuffer_t : chain of blobs could be used in scattered sending
/////////////////////////////////////////////////////////////////////////////
SmartOutputBuffer_t::~SmartOutputBuffer_t()
{
m_dChunks.Apply( []( ISphOutputBuffer*& pChunk ) {
SafeDelete ( pChunk );
} );
}
void SmartOutputBuffer_t::StartNewChunk()
{
m_dChunks.Add( new ISphOutputBuffer( m_dBuf ));
m_dBuf.Reserve( NETOUTBUF );
}
/*
void SmartOutputBuffer_t::AppendBuf ( SmartOutputBuffer_t &dBuf )
{
if ( !dBuf.m_dBuf.IsEmpty () )
dBuf.StartNewChunk ();
for ( auto * pChunk : dBuf.m_dChunks )
{
pChunk->AddRef ();
m_dChunks.Add ( pChunk );
}
}
void SmartOutputBuffer_t::PrependBuf ( SmartOutputBuffer_t &dBuf )
{
CSphVector<ISphOutputBuffer *> dChunks;
if ( !dBuf.m_dBuf.IsEmpty () )
dBuf.StartNewChunk ();
for ( auto * pChunk : dBuf.m_dChunks )
{
pChunk->AddRef ();
dChunks.Add ( pChunk );
}
dChunks.Append ( m_dChunks );
m_dChunks.SwapData ( dChunks );
}
*/
#ifndef UIO_MAXIOV
#define UIO_MAXIOV (1024)
#endif
// makes vector of chunks suitable to direct using in Send() or WSASend()
// returns federated size of the chunks
size_t SmartOutputBuffer_t::GetIOVec( CSphVector<sphIovec>& dOut ) const
{
size_t iOutSize = 0;
dOut.Reset();
m_dChunks.Apply( [ &dOut, &iOutSize ]( const ISphOutputBuffer* pChunk ) {
auto& dIovec = dOut.Add();
IOPTR( dIovec ) = IOBUFTYPE ( pChunk->GetBufPtr());
IOLEN ( dIovec ) = pChunk->GetSentCount();
iOutSize += IOLEN ( dIovec );
} );
if ( !m_dBuf.IsEmpty())
{
auto& dIovec = dOut.Add();
IOPTR ( dIovec ) = IOBUFTYPE ( GetBufPtr());
IOLEN ( dIovec ) = (int) m_dBuf.GetLengthBytes();
iOutSize += IOLEN ( dIovec );
}
assert ( dOut.GetLength()<UIO_MAXIOV );
return iOutSize;
};
void SmartOutputBuffer_t::Reset()
{
m_dChunks.Apply( []( ISphOutputBuffer*& pChunk ) {
SafeDelete ( pChunk );
} );
m_dChunks.Reset();
m_dBuf.Reset();
m_dBuf.Reserve( NETOUTBUF );
};
#if _WIN32
void SmartOutputBuffer_t::LeakTo ( CSphVector<ISphOutputBuffer *> dOut )
{
for ( auto & pChunk : m_dChunks )
dOut.Add ( pChunk );
m_dChunks.Reset ();
dOut.Add ( new ISphOutputBuffer ( m_dBuf ) );
m_dBuf.Reserve ( NETOUTBUF );
}
#endif
/////////////////////////////////////////////////////////////////////////////
InputBuffer_c::InputBuffer_c( const BYTE* pBuf, int iLen )
: m_pBuf( pBuf ), m_pCur( pBuf ), m_iLen( iLen )
{
if ( !pBuf || iLen<0 )
SetError ( "empty input buffer" );
}
InputBuffer_c::InputBuffer_c ( const VecTraits_T<BYTE> & dBuf )
: m_pBuf ( dBuf.begin() ), m_pCur ( dBuf.begin () ), m_iLen ( dBuf.GetLength() )
{
if ( dBuf.IsEmpty() )
SetError ( "empty input buffer" );
}
CSphString InputBuffer_c::GetString()
{
int iLen = GetInt();
return GetRawString ( iLen );
}
CSphString InputBuffer_c::GetRawString( int iLen )
{
CSphString sRes;
if ( m_bError || !IsDataSizeValid ( iLen ) )
return sRes;
if ( iLen )
sRes.SetBinary( (const char*) m_pCur, iLen );
m_pCur += iLen;
return sRes;
}
bool InputBuffer_c::GetString( CSphVector<BYTE>& dBuffer )
{
int iLen = GetInt();
if ( m_bError || !IsDataSizeValid ( iLen ) )
return false;
if ( !iLen )
return true;
return GetBytes( dBuffer.AddN( iLen ), iLen );
}
bool InputBuffer_c::GetBytes( void* pBuf, int iLen )
{
assert ( pBuf );
if ( m_bError || !IsDataSizeValid ( iLen ) )
return false;
memcpy( pBuf, m_pCur, iLen );
m_pCur += iLen;
return true;
}
bool InputBuffer_c::GetBytesZerocopy( const BYTE** ppData, int iLen )
{
assert ( ppData );
if ( m_bError || !IsDataSizeValid ( iLen ) )
return false;
*ppData = m_pCur;
m_pCur += iLen;
return true;
}
bool InputBuffer_c::GetDwords( CSphVector<DWORD>& dBuffer, int& iGot, int iMax )
{
iGot = GetInt();
if ( iGot<0 || iGot>iMax )
{
SetError( "length %d (should be in 0..%d range)", iGot, iMax );
return false;
}
dBuffer.Resize( iGot );
ARRAY_FOREACH ( i, dBuffer )
dBuffer[i] = GetDword();
if ( m_bError )
dBuffer.Reset();
return !m_bError;
}
bool InputBuffer_c::GetQwords( CSphVector<SphAttr_t>& dBuffer, int& iGot, int iMax )
{
iGot = GetInt();
if ( iGot<0 || iGot>iMax )
{
SetError( "length %d (should be in 0..%d range)", iGot, iMax );
return false;
}
dBuffer.Resize( iGot );
ARRAY_FOREACH ( i, dBuffer )
dBuffer[i] = GetUint64();
if ( m_bError )
dBuffer.Reset();
return !m_bError;
}
void InputBuffer_c::SetError( const char * sTemplate, ... )
{
m_bError = true;
va_list ap;
va_start ( ap, sTemplate );
m_sError.SetSprintfVa ( sTemplate, ap );
va_end ( ap );
}
bool InputBuffer_c::IsDataSizeValid ( int iSize )
{
if ( !IsLessMaxPacket ( iSize ) )
{
return false;
} else if ( m_pCur + iSize>m_pBuf + m_iLen )
{
SetError( "read overflows buffer by %d byte, data size %d", (int)( ( m_pCur + iSize ) - ( m_pBuf + m_iLen ) ), iSize );
return false;
}
return true;
}
bool InputBuffer_c::IsLessMaxPacket ( int iSize )
{
if ( iSize<0 )
{
SetError( "negative data length %d", iSize );
return false;
} else if ( iSize>m_iMaxPacketSize )
{
SetError( "length out of bounds %d(%d)", iSize, m_iMaxPacketSize );
return false;
}
return true;
}
void InputBuffer_c::ResetError()
{
m_bError = false;
m_sError = "";
}
void GenericOutputBuffer_c::ResetError()
{
m_bError = false;
m_sError = "";
}
void InputBuffer_c::SetMaxPacketSize( int iMaxPacketSize )
{
m_iMaxPacketSize = Max ( g_iMaxPacketSize, iMaxPacketSize );
}
/////////////////////////////////////////////////////////////////////////////
// SERVED INDEX DESCRIPTORS STUFF
/////////////////////////////////////////////////////////////////////////////
class QueryStatContainer_c: public QueryStatContainer_i
{
public:
void Add( uint64_t uFoundRows, uint64_t uQueryTime, uint64_t uTimestamp ) final;
QueryStatRecord_t GetRecord( int iRecord ) const noexcept final;
int GetNumRecords() const final;
QueryStatContainer_c();
QueryStatContainer_c( QueryStatContainer_c&& tOther ) noexcept;
void Swap( QueryStatContainer_c& rhs ) noexcept;
QueryStatContainer_c& operator=( QueryStatContainer_c tOther ) noexcept;
private:
CircularBuffer_T<QueryStatRecord_t> m_dRecords;
};
std::unique_ptr<QueryStatContainer_i> MakeStatsContainer ()
{
return std::make_unique<QueryStatContainer_c>();
}
void QueryStatContainer_c::Add( uint64_t uFoundRows, uint64_t uQueryTime, uint64_t uTimestamp )
{
if ( !m_dRecords.IsEmpty())
{
QueryStatRecord_t& tLast = m_dRecords.Last();
const uint64_t BUCKET_TIME_DELTA = 100000;
if ( uTimestamp - tLast.m_uTimestamp<=BUCKET_TIME_DELTA )
{
tLast.m_uFoundRowsMin = Min( uFoundRows, tLast.m_uFoundRowsMin );
tLast.m_uFoundRowsMax = Max( uFoundRows, tLast.m_uFoundRowsMax );
tLast.m_uFoundRowsSum += uFoundRows;
tLast.m_uQueryTimeMin = Min( uQueryTime, tLast.m_uQueryTimeMin );
tLast.m_uQueryTimeMax = Max( uQueryTime, tLast.m_uQueryTimeMax );
tLast.m_uQueryTimeSum += uQueryTime;
tLast.m_iCount++;
return;
}
}
const uint64_t MAX_TIME_DELTA = 15 * 60 * 1000000;
while ( !m_dRecords.IsEmpty() && ( uTimestamp - m_dRecords[0].m_uTimestamp )>MAX_TIME_DELTA )
m_dRecords.Pop();
QueryStatRecord_t& tRecord = m_dRecords.Push();
tRecord.m_uFoundRowsMin = uFoundRows;
tRecord.m_uFoundRowsMax = uFoundRows;
tRecord.m_uFoundRowsSum = uFoundRows;
tRecord.m_uQueryTimeMin = uQueryTime;
tRecord.m_uQueryTimeMax = uQueryTime;
tRecord.m_uQueryTimeSum = uQueryTime;
tRecord.m_uTimestamp = uTimestamp;
tRecord.m_iCount = 1;
}
QueryStatRecord_t QueryStatContainer_c::GetRecord ( int iRecord ) const noexcept
{
return m_dRecords[iRecord];
}
int QueryStatContainer_c::GetNumRecords() const
{
return m_dRecords.GetLength();
}
QueryStatContainer_c::QueryStatContainer_c() = default;
QueryStatContainer_c::QueryStatContainer_c( QueryStatContainer_c&& tOther ) noexcept
: QueryStatContainer_c()
{ Swap( tOther ); }
void QueryStatContainer_c::Swap( QueryStatContainer_c& rhs ) noexcept
{
rhs.m_dRecords.Swap( m_dRecords );
}
QueryStatContainer_c& QueryStatContainer_c::operator=( QueryStatContainer_c tOther ) noexcept
{
Swap( tOther );
return *this;
}
//////////////////////////////////////////////////////////////////////////
#ifndef NDEBUG
class QueryStatContainerExact_c: public QueryStatContainer_i
{
public:
void Add( uint64_t uFoundRows, uint64_t uQueryTime, uint64_t uTimestamp ) final;
QueryStatRecord_t GetRecord( int iRecord ) const noexcept final;
int GetNumRecords() const final;
QueryStatContainerExact_c();
QueryStatContainerExact_c( QueryStatContainerExact_c&& tOther ) noexcept;
void Swap( QueryStatContainerExact_c& rhs ) noexcept;
QueryStatContainerExact_c& operator=( QueryStatContainerExact_c tOther ) noexcept;
private:
struct QueryStatRecordExact_t
{
uint64_t m_uQueryTime;
uint64_t m_uFoundRows;
uint64_t m_uTimestamp;
};
CircularBuffer_T<QueryStatRecordExact_t> m_dRecords;
};
void QueryStatContainerExact_c::Add( uint64_t uFoundRows, uint64_t uQueryTime, uint64_t uTimestamp )
{
const uint64_t MAX_TIME_DELTA = 15 * 60 * 1000000;
while ( !m_dRecords.IsEmpty() && ( uTimestamp - m_dRecords[0].m_uTimestamp )>MAX_TIME_DELTA )
m_dRecords.Pop();
QueryStatRecordExact_t& tRecord = m_dRecords.Push();
tRecord.m_uFoundRows = uFoundRows;
tRecord.m_uQueryTime = uQueryTime;
tRecord.m_uTimestamp = uTimestamp;
}
int QueryStatContainerExact_c::GetNumRecords() const
{
return m_dRecords.GetLength();
}
QueryStatRecord_t QueryStatContainerExact_c::GetRecord ( int iRecord ) const noexcept
{
QueryStatRecord_t tRecord;
const QueryStatRecordExact_t& tExact = m_dRecords[iRecord];
tRecord.m_uQueryTimeMin = tExact.m_uQueryTime;
tRecord.m_uQueryTimeMax = tExact.m_uQueryTime;
tRecord.m_uQueryTimeSum = tExact.m_uQueryTime;
tRecord.m_uFoundRowsMin = tExact.m_uFoundRows;
tRecord.m_uFoundRowsMax = tExact.m_uFoundRows;
tRecord.m_uFoundRowsSum = tExact.m_uFoundRows;
tRecord.m_uTimestamp = tExact.m_uTimestamp;
tRecord.m_iCount = 1;
return tRecord;
}
QueryStatContainerExact_c::QueryStatContainerExact_c() = default;
QueryStatContainerExact_c::QueryStatContainerExact_c( QueryStatContainerExact_c&& tOther ) noexcept
: QueryStatContainerExact_c()
{ Swap( tOther ); }
void QueryStatContainerExact_c::Swap( QueryStatContainerExact_c& rhs ) noexcept
{
rhs.m_dRecords.Swap( m_dRecords );
}
QueryStatContainerExact_c& QueryStatContainerExact_c::operator=( QueryStatContainerExact_c tOther ) noexcept
{
Swap( tOther );
return *this;
}
#endif
//////////////////////////////////////////////////////////////////////////
ServedStats_c::ServedStats_c()
: m_pQueryStatRecords { std::make_unique<QueryStatContainer_c>() }
#ifndef NDEBUG
, m_pQueryStatRecordsExact { std::make_unique<QueryStatContainerExact_c>() }
#endif
{}
void ServedStats_c::AddQueryStat( uint64_t uFoundRows, uint64_t uQueryTime )
{
ScWL_t wLock( m_tStatsLock );
m_tRowsFoundDigest.Add(( double ) uFoundRows );
m_tQueryTimeDigest.Add(( double ) uQueryTime );
uint64_t uTimeStamp = sphMicroTimer();
m_pQueryStatRecords->Add( uFoundRows, uQueryTime, uTimeStamp );
#ifndef NDEBUG
m_pQueryStatRecordsExact->Add( uFoundRows, uQueryTime, uTimeStamp );
#endif
m_uTotalFoundRowsMin = Min( uFoundRows, m_uTotalFoundRowsMin );
m_uTotalFoundRowsMax = Max( uFoundRows, m_uTotalFoundRowsMax );
m_uTotalFoundRowsSum += uFoundRows;
m_uTotalQueryTimeMin = Min( uQueryTime, m_uTotalQueryTimeMin );
m_uTotalQueryTimeMax = Max( uQueryTime, m_uTotalQueryTimeMax );
m_uTotalQueryTimeSum += uQueryTime;
++m_uTotalQueries;
}
static const uint64_t g_dStatsIntervals[] =
{
1 * 60 * 1000000,
5 * 60 * 1000000,
15 * 60 * 1000000
};
void ServedStats_c::CalculateQueryStats( QueryStats_t& tRowsFoundStats, QueryStats_t& tQueryTimeStats ) const
{
ScRL_t rLock { m_tStatsLock };
DoStatCalcStats ( m_pQueryStatRecords.get(), tRowsFoundStats, tQueryTimeStats );
}
#ifndef NDEBUG
void ServedStats_c::CalculateQueryStatsExact( QueryStats_t& tRowsFoundStats, QueryStats_t& tQueryTimeStats ) const
{
ScRL_t rLock { m_tStatsLock };
DoStatCalcStats ( m_pQueryStatRecordsExact.get(), tRowsFoundStats, tQueryTimeStats );
}
#endif // !NDEBUG
static void CalcStatsForInterval( const QueryStatContainer_i* pContainer, QueryStatElement_t& tRowResult, QueryStatElement_t& tTimeResult, uint64_t uTimestamp, uint64_t uInterval, int iRecords )
{
assert ( pContainer );
using namespace QueryStats;
tRowResult.m_dData[TYPE_AVG] = 0;
tRowResult.m_dData[TYPE_MIN] = UINT64_MAX;
tRowResult.m_dData[TYPE_MAX] = 0;
tTimeResult.m_dData[TYPE_AVG] = 0;
tTimeResult.m_dData[TYPE_MIN] = UINT64_MAX;
tTimeResult.m_dData[TYPE_MAX] = 0;
CSphTightVector<uint64_t> dFound, dTime;
dFound.Reserve( iRecords );
dTime.Reserve( iRecords );
DWORD uTotalQueries = 0;
for ( int i = 0; i<pContainer->GetNumRecords(); ++i )
{
auto tRecord = pContainer->GetRecord ( i );
if ( uTimestamp - tRecord.m_uTimestamp<=uInterval )
{
tRowResult.m_dData[TYPE_MIN] = Min( tRecord.m_uFoundRowsMin, tRowResult.m_dData[TYPE_MIN] );
tRowResult.m_dData[TYPE_MAX] = Max( tRecord.m_uFoundRowsMax, tRowResult.m_dData[TYPE_MAX] );
tTimeResult.m_dData[TYPE_MIN] = Min( tRecord.m_uQueryTimeMin, tTimeResult.m_dData[TYPE_MIN] );
tTimeResult.m_dData[TYPE_MAX] = Max( tRecord.m_uQueryTimeMax, tTimeResult.m_dData[TYPE_MAX] );
dFound.Add( tRecord.m_uFoundRowsSum / tRecord.m_iCount );
dTime.Add( tRecord.m_uQueryTimeSum / tRecord.m_iCount );
tRowResult.m_dData[TYPE_AVG] += tRecord.m_uFoundRowsSum;
tTimeResult.m_dData[TYPE_AVG] += tRecord.m_uQueryTimeSum;
uTotalQueries += tRecord.m_iCount;
}
}
dFound.Sort();
dTime.Sort();
tRowResult.m_uTotalQueries = uTotalQueries;
tTimeResult.m_uTotalQueries = uTotalQueries;
if ( !dFound.GetLength())
return;
tRowResult.m_dData[TYPE_AVG] /= uTotalQueries;
tTimeResult.m_dData[TYPE_AVG] /= uTotalQueries;
int u95 = Max( 0, Min( int( ceilf( dFound.GetLength() * 0.95f ) + 0.5f ) - 1, dFound.GetLength() - 1 ));
int u99 = Max( 0, Min( int( ceilf( dFound.GetLength() * 0.99f ) + 0.5f ) - 1, dFound.GetLength() - 1 ));
tRowResult.m_dData[TYPE_95] = dFound[u95];
tRowResult.m_dData[TYPE_99] = dFound[u99];
tTimeResult.m_dData[TYPE_95] = dTime[u95];
tTimeResult.m_dData[TYPE_99] = dTime[u99];
}
void CalcSimpleStats ( const QueryStatContainer_i * pContainer, QueryStats_t & tRowsFoundStats, QueryStats_t & tQueryTimeStats )
{
assert ( pContainer );
using namespace QueryStats;
auto uTimestamp = sphMicroTimer ();
int iRecords = pContainer->GetNumRecords ();
for ( int i = INTERVAL_1MIN; i<=INTERVAL_15MIN; ++i )
CalcStatsForInterval ( pContainer, tRowsFoundStats.m_dStats[i], tQueryTimeStats.m_dStats[i], uTimestamp, g_dStatsIntervals[i], iRecords );
}
void ServedStats_c::DoStatCalcStats( const QueryStatContainer_i* pContainer, QueryStats_t& tRowsFoundStats, QueryStats_t& tQueryTimeStats ) const
{
assert ( pContainer );
using namespace QueryStats;
auto uTimestamp = sphMicroTimer();
int iRecords = m_pQueryStatRecords->GetNumRecords();
for ( int i = INTERVAL_1MIN; i<=INTERVAL_15MIN; ++i )
CalcStatsForInterval( pContainer, tRowsFoundStats.m_dStats[i], tQueryTimeStats.m_dStats[i], uTimestamp, g_dStatsIntervals[i], iRecords );
auto& tRowsAllStats = tRowsFoundStats.m_dStats[INTERVAL_ALLTIME];
tRowsAllStats.m_dData[TYPE_AVG] = m_uTotalQueries ? m_uTotalFoundRowsSum / m_uTotalQueries : 0;
tRowsAllStats.m_dData[TYPE_MIN] = m_uTotalFoundRowsMin;
tRowsAllStats.m_dData[TYPE_MAX] = m_uTotalFoundRowsMax;
tRowsAllStats.m_dData[TYPE_95] = ( uint64_t ) m_tRowsFoundDigest.Percentile( 95 );
tRowsAllStats.m_dData[TYPE_99] = ( uint64_t ) m_tRowsFoundDigest.Percentile( 99 );
tRowsAllStats.m_uTotalQueries = m_uTotalQueries;
auto& tQueryAllStats = tQueryTimeStats.m_dStats[INTERVAL_ALLTIME];
tQueryAllStats.m_dData[TYPE_AVG] = m_uTotalQueries ? m_uTotalQueryTimeSum / m_uTotalQueries : 0;
tQueryAllStats.m_dData[TYPE_MIN] = m_uTotalQueryTimeMin;
tQueryAllStats.m_dData[TYPE_MAX] = m_uTotalQueryTimeMax;
tQueryAllStats.m_dData[TYPE_95] = ( uint64_t ) m_tQueryTimeDigest.Percentile( 95 );
tQueryAllStats.m_dData[TYPE_99] = ( uint64_t ) m_tQueryTimeDigest.Percentile( 99 );
tQueryAllStats.m_uTotalQueries = m_uTotalQueries;
}
//////////////////////////////////////////////////////////////////////////
RunningIndex_c::~RunningIndex_c()
{
if ( m_bLeaked )
auto VARIABLE_IS_NOT_USED p = m_pIndex.release();
if ( m_pIndex )
m_pIndex->Dealloc();
if ( !m_sUnlink.IsEmpty() )
{
sphLogDebug ( "unlink %s", m_sUnlink.cstr() );
sphUnlinkIndex ( m_sUnlink.cstr(), false );
}
}
void ServedIndex_c::UpdateMass () const NO_THREAD_SAFETY_ANALYSIS
{
CSphIndexStatus tStatus;
m_pIndex->m_pIndex->GetStatus ( &tStatus );
// break const, since mass value is not critical for races
m_iMass = (int) CalculateMass ( tStatus );
}
// Get index mass
uint64_t ServedIndex_c::GetIndexMass ( const ServedIndex_c* pServed )
{
return pServed ? pServed->m_iMass : 0;
}
void ServedIndex_c::SetIdx ( std::unique_ptr<CSphIndex>&& pIndex ) NO_THREAD_SAFETY_ANALYSIS
{
assert ( !m_pIndex );
m_pIndex = new RunningIndex_c;
m_pIndex->m_pIndex = std::move ( pIndex );
if ( !m_pStats )
m_pStats = new ServedStats_c;
}
void ServedIndex_c::ReleaseIdx() const NO_THREAD_SAFETY_ANALYSIS
{
if ( m_pIndex )
m_pIndex->m_bLeaked = true;
}
void ServedIndex_c::SetIdxAndStatsFrom ( const ServedIndex_c& tIndex ) NO_THREAD_SAFETY_ANALYSIS
{
m_pIndex = tIndex.m_pIndex;
m_pStats = tIndex.m_pStats;
}
void ServedIndex_c::SetStatsFrom ( const ServedIndex_c& tIndex ) NO_THREAD_SAFETY_ANALYSIS
{
m_pStats = tIndex.m_pStats;
}
void ServedIndex_c::SetUnlink ( CSphString sUnlink ) const
{
if ( m_pIndex )
m_pIndex->m_sUnlink = std::move ( sUnlink );
}
void LightClone ( ServedIndexRefPtr_c& pTarget, const cServedIndexRefPtr_c& pSource )
{
assert ( pTarget );
assert ( pSource );
auto& tDesc = (ServedDesc_t&)*pTarget;
tDesc = *pSource;
}
void FullClone ( ServedIndexRefPtr_c& pTarget, const cServedIndexRefPtr_c& pSource )
{
LightClone ( pTarget, pSource );
pTarget->SetIdxAndStatsFrom ( *pSource );
}
//////////////////////////////////////////////////////////////////////////
ServedIndexRefPtr_c& ServedClone_c::LightCloneOnce()
{
if ( !m_pTarget )
m_pTarget = MakeLightClone ( m_pSource );
return m_pTarget;
}
ServedIndexRefPtr_c& ServedClone_c::FullCloneOnce()
{
if ( !m_pTarget )
m_pTarget = MakeFullClone ( m_pSource );
return m_pTarget;
}
HashedServedClone_c::HashedServedClone_c ( CSphString sIndex, ReadOnlyServedHash_c* pHash )
: ServedClone_c { pHash->Get ( sIndex ) }
, m_sIndex { std::move ( sIndex ) }
, m_pHash { pHash }
{}
HashedServedClone_c::~HashedServedClone_c()
{
if ( !CloneRef() )
return;
m_pHash->Replace ( CloneRef(), m_sIndex );
}
//////////////////////////////////////////////////////////////////////////
ServedIndexRefPtr_c MakeServedIndex()
{
return ServedIndexRefPtr_c { new ServedIndex_c };
}
ServedIndexRefPtr_c MakeLightClone( const cServedIndexRefPtr_c& pSource )
{
auto pRes = MakeServedIndex();
LightClone ( pRes, pSource );
return pRes;
}
ServedIndexRefPtr_c MakeFullClone ( const cServedIndexRefPtr_c& pSource )
{
auto pRes = MakeServedIndex();
FullClone ( pRes, pSource );
return pRes;
}
//////////////////////////////////////////////////////////////////////////
CSphString GetMacAddress()
{
StringBuilder_c sMAC( ":" );
#if _WIN32
CSphFixedVector<IP_ADAPTER_ADDRESSES> dAdapters ( 128 );
PIP_ADAPTER_ADDRESSES pAdapter = dAdapters.Begin();
auto uSize = (DWORD) dAdapters.GetLengthBytes();
if ( GetAdaptersAddresses ( 0, 0, nullptr, pAdapter, &uSize )==NO_ERROR )
{
while ( pAdapter )
{
if ( pAdapter->IfType == IF_TYPE_ETHERNET_CSMACD && pAdapter->PhysicalAddressLength>=6 )
{
const BYTE * pMAC = pAdapter->PhysicalAddress;
for ( DWORD i=0; i<pAdapter->PhysicalAddressLength; i++ )
{
sMAC.Appendf ( "%02x", *pMAC );
pMAC++;
}
break;
}
pAdapter = pAdapter->Next;
}
}
#elif defined(__FreeBSD__)
size_t iLen = 0;
const int iMibLen = 6;
int dMib[iMibLen] = { CTL_NET, AF_ROUTE, 0, AF_LINK, NET_RT_IFLIST, 0 };
if ( sysctl ( dMib, iMibLen, NULL, &iLen, NULL, 0 )!=-1 )
{
CSphFixedVector<char> dBuf ( iLen );
if ( sysctl ( dMib, iMibLen, dBuf.Begin(), &iLen, NULL, 0 )>=0 )
{
if_msghdr * pIf = nullptr;
for ( const char * pNext = dBuf.Begin(); pNext<dBuf.Begin() + iLen; pNext+=pIf->ifm_msglen )
{
pIf = (if_msghdr *)pNext;
if ( pIf->ifm_type==RTM_IFINFO )
{
bool bAllZero = true;
const sockaddr_dl * pSdl= (const sockaddr_dl *)(pIf + 1);
const BYTE * pMAC = (const BYTE *)LLADDR(pSdl);
for ( int i=0; i<ETHER_ADDR_LEN; i++ )
{
BYTE uPart = *pMAC;
pMAC++;
bAllZero &= ( uPart==0 );
sMAC.Appendf ( "%02x", uPart );
}
if ( !bAllZero )
break;
sMAC.Clear();
sMAC.StartBlock ( ":" );
}
}
}
}
#elif defined ( __APPLE__ )
// no MAC address for OSX
#else
int iFD = socket( AF_INET, SOCK_DGRAM, 0 );
if ( iFD>=0 )
{
ifreq dIf[64];
ifconf tIfConf;
tIfConf.ifc_len = sizeof( dIf );
tIfConf.ifc_req = dIf;
if ( ioctl( iFD, SIOCGIFCONF, &tIfConf )>=0 )
{
const ifreq* pIfEnd = dIf + ( tIfConf.ifc_len / sizeof( dIf[0] ));
for ( const ifreq* pIfCur = tIfConf.ifc_req; pIfCur<pIfEnd; pIfCur++ )
{
if ( pIfCur->ifr_addr.sa_family==AF_INET )
{
ifreq tIfCur;
memset( &tIfCur, 0, sizeof( tIfCur ));
memcpy( tIfCur.ifr_name, pIfCur->ifr_name, sizeof( tIfCur.ifr_name ));
if ( ioctl( iFD, SIOCGIFHWADDR, &tIfCur )>=0 )
{
bool bAllZero = true;
const BYTE* pMAC = ( const BYTE* ) tIfCur.ifr_hwaddr.sa_data;
for ( int i = 0; i<ETHER_ADDR_LEN; i++ )
{
BYTE uPart = *pMAC;
pMAC++;
bAllZero &= ( uPart==0 );
sMAC.Appendf( "%02x", uPart );
}
if ( !bAllZero )
break;
sMAC.Clear();
sMAC.StartBlock( ":" );
}
}
}
}
}
SafeClose( iFD );
#endif
return sMAC.cstr();
}
volatile bool& sphGetSeamlessRotate() noexcept
{
#if _WIN32
static bool bSeamlessRotate = false;
#else
static bool bSeamlessRotate = true;
#endif
return bSeamlessRotate;
}
bool PollOptimizeRunning ( const CSphString & sIndex )
{
while ( true )
{
Threads::Coro::SleepMsec ( 500 );
auto pTmpIndex = GetServed ( sIndex );
if ( !ServedDesc_t::IsMutable ( pTmpIndex ) )
return false;
RIdx_T<RtIndex_i *> pRtIndex { pTmpIndex };
if ( !pRtIndex->OptimizesRunning () )
return true;
}
}
| 41,118
|
C++
|
.cpp
| 1,340
| 28.386567
| 194
| 0.678622
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,852
|
indexsettings.cpp
|
manticoresoftware_manticoresearch/src/indexsettings.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "indexsettings.h"
#include "sphinxstd.h"
#include "sphinxint.h"
#include "fileutils.h"
#include "sphinxstem.h"
#include "icu.h"
#include "jieba.h"
#include "attribute.h"
#include "knnmisc.h"
#include "indexfiles.h"
#include "tokenizer/tokenizer.h"
#include "client_task_info.h"
#include "knnlib.h"
#include "secondarylib.h"
#if !_WIN32
#include <glob.h>
#endif
static CreateFilenameBuilder_fn g_fnCreateFilenameBuilder = nullptr;
static AttrEngine_e g_eAttrEngine = AttrEngine_e::ROWWISE;
void SetIndexFilenameBuilder ( CreateFilenameBuilder_fn pBuilder )
{
g_fnCreateFilenameBuilder = pBuilder;
}
CreateFilenameBuilder_fn GetIndexFilenameBuilder()
{
return g_fnCreateFilenameBuilder;
}
static inline SphWordID_t cast2wordid ( int64_t iVal )
{
return *(SphWordID_t*)&iVal;
}
static const char * BigramName ( ESphBigram eType )
{
switch ( eType )
{
case SPH_BIGRAM_ALL:
return "all";
case SPH_BIGRAM_FIRSTFREQ:
return "first_freq";
case SPH_BIGRAM_BOTHFREQ:
return "both_freq";
case SPH_BIGRAM_NONE:
default:
return "none";
}
}
CSphString CompressionToStr ( Compression_e eComp )
{
switch ( eComp )
{
case Compression_e::LZ4:
return "lz4";
case Compression_e::LZ4HC:
return "lz4hc";
case Compression_e::NONE:
default:
return "none";
}
}
//////////////////////////////////////////////////////////////////////////
struct SettingsFormatterState_t
{
FILE * m_pFile = nullptr;
StringBuilder_c * m_pBuf = nullptr;
bool m_bFirst = true;
SettingsFormatterState_t ( FILE * pFile );
SettingsFormatterState_t ( StringBuilder_c & tBuf );
};
SettingsFormatterState_t::SettingsFormatterState_t ( FILE * pFile )
: m_pFile ( pFile )
{}
SettingsFormatterState_t::SettingsFormatterState_t ( StringBuilder_c & tBuf )
: m_pBuf ( &tBuf )
{}
class SettingsFormatter_c
{
public:
SettingsFormatter_c ( SettingsFormatterState_t & tState, const char * szPrefix, const char * szEq, const char * szPostfix, const char * szSeparator, bool bIgnoreConf = false, bool bEscapeValues = false );
template <typename T>
void Add ( const char * szKey, T tVal, bool bCond );
template <typename T>
void AddEmbedded ( const char * szKey, const VecTraits_T<T> & dEmbedded, bool bCond );
private:
template <typename T>
CSphString FormatValue(T tVal);
SettingsFormatterState_t & m_tState;
CSphString m_sPrefix;
CSphString m_sEq;
CSphString m_sPostfix;
CSphString m_sSeparator;
bool m_bIgnoreCond = false;
bool m_bEscapeValues = false;
};
SettingsFormatter_c::SettingsFormatter_c ( SettingsFormatterState_t & tState, const char * szPrefix, const char * szEq, const char * szPostfix, const char * szSeparator, bool bIgnoreCond, bool bEscapeValues )
: m_tState ( tState )
, m_sPrefix ( szPrefix )
, m_sEq ( szEq )
, m_sPostfix ( szPostfix )
, m_sSeparator ( szSeparator )
, m_bIgnoreCond ( bIgnoreCond )
, m_bEscapeValues ( bEscapeValues )
{}
using SqlEscapedBuilder_c = EscapedStringBuilder_T<BaseQuotation_T<SqlQuotator_t>>;
template<typename T>
CSphString SettingsFormatter_c::FormatValue(T tVal) {
SqlEscapedBuilder_c dEscaped;
// convert tVal to CSphString
CSphString sVal;
dEscaped << tVal;
dEscaped.MoveTo(sVal);
if (!m_bEscapeValues) {
// return plain string
return sVal;
}
// build escaped string
CSphString sRes;
dEscaped.AppendEscapedSkippingCommaNoQuotes(sVal.cstr());
dEscaped.MoveTo( sRes);
return sRes;
}
template <typename T>
void SettingsFormatter_c::Add ( const char * szKey, T tVal, bool bCond )
{
if ( !m_bIgnoreCond && !bCond )
return;
if ( m_tState.m_pBuf )
{
if ( !m_tState.m_bFirst )
(*m_tState.m_pBuf) << m_sSeparator;
(*m_tState.m_pBuf) << m_sPrefix << szKey << m_sEq << FormatValue(tVal) << m_sPostfix;
}
if ( m_tState.m_pFile )
{
StringBuilder_c tBuilder;
if ( !m_tState.m_bFirst )
tBuilder << m_sSeparator;
tBuilder << m_sPrefix << szKey << m_sEq << FormatValue(tVal) << m_sPostfix;
fputs ( tBuilder.cstr(), m_tState.m_pFile );
}
m_tState.m_bFirst = false;
}
template <typename T>
void SettingsFormatter_c::AddEmbedded ( const char * szKey, const VecTraits_T<T> & dEmbedded, bool bCond )
{
CSphString sPlural;
sPlural.SetSprintf( "%ss", szKey );
Add ( sPlural.cstr(), bCond ? 1 : 0, true );
if ( bCond )
{
ARRAY_FOREACH ( i, dEmbedded )
{
CSphString sName;
sName.SetSprintf ( "%s [%d]", szKey, i );
Add ( sName.cstr(), dEmbedded[i], true );
}
}
}
//////////////////////////////////////////////////////////////////////////
void SettingsWriter_c::DumpReadable ( SettingsFormatterState_t & tState, const CSphEmbeddedFiles & tEmbeddedFiles, FilenameBuilder_i * pFilenameBuilder ) const
{
SettingsFormatter_c tFormatter ( tState, "", ": ", "", "\n", true );
Format ( tFormatter, pFilenameBuilder );
}
//////////////////////////////////////////////////////////////////////////
static RtTypedAttr_t g_dRtTypedAttrs[]=
{
{ SPH_ATTR_INTEGER, "rt_attr_uint" },
{ SPH_ATTR_BIGINT, "rt_attr_bigint" },
{ SPH_ATTR_TIMESTAMP, "rt_attr_timestamp" },
{ SPH_ATTR_BOOL, "rt_attr_bool" },
{ SPH_ATTR_FLOAT, "rt_attr_float" },
{ SPH_ATTR_STRING, "rt_attr_string" },
{ SPH_ATTR_JSON, "rt_attr_json" },
{ SPH_ATTR_UINT32SET, "rt_attr_multi" },
{ SPH_ATTR_INT64SET, "rt_attr_multi_64" },
{ SPH_ATTR_FLOAT_VECTOR,"rt_attr_float_vector" }
};
int GetNumRtTypes()
{
return sizeof(g_dRtTypedAttrs)/sizeof(g_dRtTypedAttrs[0]);
}
const RtTypedAttr_t & GetRtType ( int iType )
{
return g_dRtTypedAttrs[iType];
}
static CSphString FormatPath ( const CSphString & sFile, const FilenameBuilder_i * pFilenameBuilder )
{
if ( !pFilenameBuilder || sFile.IsEmpty() || IsPathAbsolute ( sFile ) )
return sFile;
return pFilenameBuilder->GetFullPath(sFile);
}
//////////////////////////////////////////////////////////////////////////
ESphWordpart CSphSourceSettings::GetWordpart ( const char * sField, bool bWordDict )
{
if ( bWordDict )
return SPH_WORDPART_WHOLE;
bool bPrefix = ( m_iMinPrefixLen>0 ) && ( m_dPrefixFields.IsEmpty () || m_dPrefixFields.Contains ( sField ) );
bool bInfix = ( m_iMinInfixLen>0 ) && ( m_dInfixFields.IsEmpty() || m_dInfixFields.Contains ( sField ) );
assert ( !( bPrefix && bInfix ) ); // no field must be marked both prefix and infix
if ( bPrefix )
return SPH_WORDPART_PREFIX;
if ( bInfix )
return SPH_WORDPART_INFIX;
return SPH_WORDPART_WHOLE;
}
int CSphSourceSettings::GetMinPrefixLen ( bool bWordDict ) const
{
if ( !bWordDict )
return m_iMinPrefixLen;
if ( m_iMinPrefixLen )
return m_iMinPrefixLen;
if ( m_iMinInfixLen )
return 1;
return 0;
}
void CSphSourceSettings::SetMinPrefixLen ( int iMinPrefixLen )
{
m_iMinPrefixLen = iMinPrefixLen;
}
int CSphSourceSettings::RawMinPrefixLen () const
{
return m_iMinPrefixLen;
}
//////////////////////////////////////////////////////////////////////////
void DocstoreSettings_t::Format ( SettingsFormatter_c & tOut, FilenameBuilder_i * pFilenameBuilder ) const
{
DocstoreSettings_t tDefault;
tOut.Add ( "docstore_compression", CompressionToStr(m_eCompression), m_eCompression!=tDefault.m_eCompression );
tOut.Add ( "docstore_compression_level", m_iCompressionLevel, m_iCompressionLevel!=tDefault.m_iCompressionLevel );
tOut.Add ( "docstore_block_size", m_uBlockSize, m_uBlockSize!=tDefault.m_uBlockSize );
}
//////////////////////////////////////////////////////////////////////////
void CSphTokenizerSettings::Setup ( const CSphConfigSection & hIndex, CSphString & sWarning )
{
m_iNgramLen = Max ( hIndex.GetInt ( "ngram_len" ), 0 );
if ( hIndex ( "ngram_chars" ) )
{
if ( m_iNgramLen )
m_iType = TOKENIZER_NGRAM;
else
sWarning = "ngram_chars specified, but ngram_len=0; IGNORED";
}
m_sCaseFolding = hIndex.GetStr ( "charset_table", "non_cont" );
m_iMinWordLen = Max ( hIndex.GetInt ( "min_word_len", 1 ), 1 );
m_sNgramChars = hIndex.GetStr ( "ngram_chars" );
m_sSynonymsFile = hIndex.GetStr ( "exceptions" ); // new option name
m_sIgnoreChars = hIndex.GetStr ( "ignore_chars" );
m_sBlendChars = hIndex.GetStr ( "blend_chars" );
m_sBlendMode = hIndex.GetStr ( "blend_mode" );
// phrase boundaries
int iBoundaryStep = Max ( hIndex.GetInt ( "phrase_boundary_step" ), -1 );
if ( iBoundaryStep!=0 )
m_sBoundary = hIndex.GetStr ( "phrase_boundary" );
}
bool CSphTokenizerSettings::Load ( const FilenameBuilder_i * pFilenameBuilder, CSphReader & tReader, CSphEmbeddedFiles & tEmbeddedFiles, CSphString & sWarning )
{
m_iType = tReader.GetByte ();
if ( m_iType!=TOKENIZER_UTF8 && m_iType!=TOKENIZER_NGRAM )
{
sWarning = "can't load an old table with SBCS tokenizer";
return false;
}
m_sCaseFolding = tReader.GetString ();
m_iMinWordLen = tReader.GetDword ();
tEmbeddedFiles.m_bEmbeddedSynonyms = false;
tEmbeddedFiles.m_bEmbeddedSynonyms = !!tReader.GetByte();
if ( tEmbeddedFiles.m_bEmbeddedSynonyms )
{
int nSynonyms = (int)tReader.GetDword();
tEmbeddedFiles.m_dSynonyms.Resize ( nSynonyms );
ARRAY_FOREACH ( i, tEmbeddedFiles.m_dSynonyms )
tEmbeddedFiles.m_dSynonyms[i] = tReader.GetString();
}
m_sSynonymsFile = tReader.GetString ();
CSphString sFilePath = FormatPath ( m_sSynonymsFile, pFilenameBuilder );
tEmbeddedFiles.m_tSynonymFile.Read ( tReader, sFilePath.cstr(), false, tEmbeddedFiles.m_bEmbeddedSynonyms ? NULL : &sWarning );
m_sBoundary = tReader.GetString ();
m_sIgnoreChars = tReader.GetString ();
m_iNgramLen = tReader.GetDword ();
m_sNgramChars = tReader.GetString ();
m_sBlendChars = tReader.GetString ();
m_sBlendMode = tReader.GetString();
return true;
}
bool CSphTokenizerSettings::Load ( const FilenameBuilder_i* pFilenameBuilder, const bson::Bson_c& tNode, CSphEmbeddedFiles& tEmbeddedFiles, CSphString& sWarning )
{
using namespace bson;
m_iType = (int)Int ( tNode.ChildByName ( "type" ) );
if ( m_iType != TOKENIZER_UTF8 && m_iType != TOKENIZER_NGRAM )
{
sWarning = "can't load an old table with SBCS tokenizer";
return false;
}
m_sCaseFolding = String ( tNode.ChildByName ( "case_folding" ) );
m_iMinWordLen = (int)Int ( tNode.ChildByName ( "min_word_len" ), 1 );
auto tSynonymsNode = tNode.ChildByName ( "synonyms" );
tEmbeddedFiles.m_bEmbeddedSynonyms = !IsNullNode ( tSynonymsNode );
if ( tEmbeddedFiles.m_bEmbeddedSynonyms )
{
Bson_c ( tSynonymsNode ).ForEach ( [&tEmbeddedFiles] ( const NodeHandle_t& tNode ) {
tEmbeddedFiles.m_dSynonyms.Add ( String (tNode));
} );
}
m_sSynonymsFile = String ( tNode.ChildByName ( "synonyms_file" ) );
if ( !m_sSynonymsFile.IsEmpty() )
{
CSphString sFilePath = FormatPath ( m_sSynonymsFile, pFilenameBuilder );
tEmbeddedFiles.m_tSynonymFile.Read ( tNode.ChildByName ( "syn_file_info" ), sFilePath.cstr(), false, tEmbeddedFiles.m_bEmbeddedSynonyms ? nullptr : &sWarning );
}
m_sBoundary = String ( tNode.ChildByName ( "boundary" ) );
m_sIgnoreChars = String ( tNode.ChildByName ( "ignore_chars" ) );
m_iNgramLen = (int)Int ( tNode.ChildByName ( "ngram_len" ) );
m_sNgramChars = String ( tNode.ChildByName ( "ngram_chars" ) );
m_sBlendChars = String ( tNode.ChildByName ( "blend_chars" ) );
m_sBlendMode = String ( tNode.ChildByName ( "blend_mode" ) );
return true;
}
void CSphTokenizerSettings::Format ( SettingsFormatter_c & tOut, FilenameBuilder_i * pFilenameBuilder ) const
{
bool bKnownTokenizer = ( m_iType==TOKENIZER_UTF8 || m_iType==TOKENIZER_NGRAM );
tOut.Add ( "charset_type", bKnownTokenizer ? "utf-8" : "unknown tokenizer (deprecated sbcs?)", !bKnownTokenizer );
// fixme! need unified default charset handling
tOut.Add ( "charset_table", m_sCaseFolding, !m_sCaseFolding.IsEmpty() && m_sCaseFolding!="non_cont" );
tOut.Add ( "min_word_len", m_iMinWordLen, m_iMinWordLen>1 );
tOut.Add ( "ngram_len", m_iNgramLen, m_iNgramLen && !m_sNgramChars.IsEmpty() );
tOut.Add ( "ngram_chars", m_sNgramChars, m_iNgramLen && !m_sNgramChars.IsEmpty() );
tOut.Add ( "phrase_boundary", m_sBoundary, !m_sBoundary.IsEmpty() );
tOut.Add ( "ignore_chars", m_sIgnoreChars, !m_sIgnoreChars.IsEmpty() );
tOut.Add ( "blend_chars", m_sBlendChars, !m_sBlendChars.IsEmpty() );
tOut.Add ( "blend_mode", m_sBlendMode, !m_sBlendMode.IsEmpty() );
CSphString sSynonymsFile = FormatPath ( m_sSynonymsFile, pFilenameBuilder );
tOut.Add ( "exceptions", sSynonymsFile, !sSynonymsFile.IsEmpty() );
}
void CSphTokenizerSettings::DumpReadable ( SettingsFormatterState_t & tState, const CSphEmbeddedFiles & tEmbeddedFiles, FilenameBuilder_i * pFilenameBuilder ) const
{
SettingsFormatter_c tFormatter ( tState, "tokenizer-", ": ", "", "\n", true );
Format ( tFormatter, pFilenameBuilder );
tFormatter.AddEmbedded ( "embedded_exception", tEmbeddedFiles.m_dSynonyms, tEmbeddedFiles.m_bEmbeddedSynonyms );
}
//////////////////////////////////////////////////////////////////////////
void CSphDictSettings::Setup ( const CSphConfigSection & hIndex, FilenameBuilder_i * pFilenameBuilder, CSphString & sWarning )
{
m_sMorphology = hIndex.GetStr ( "morphology" );
m_sMorphFields = hIndex.GetStr ( "morphology_skip_fields" );
m_sStopwords = hIndex.GetStr ( "stopwords" );
m_iMinStemmingLen = hIndex.GetInt ( "min_stemming_len", 1 );
m_bStopwordsUnstemmed = hIndex.GetInt ( "stopwords_unstemmed" )!=0;
for ( CSphVariant * pWordforms = hIndex("wordforms"); pWordforms; pWordforms = pWordforms->m_pNext )
{
if ( !pWordforms->cstr() || !*pWordforms->cstr() )
continue;
CSphString sWordformFile = FormatPath ( pWordforms->cstr(), pFilenameBuilder );
StrVec_t dFilesFound = FindFiles ( sWordformFile.cstr() );
for ( auto & i : dFilesFound )
{
if ( pFilenameBuilder )
StripPath(i);
m_dWordforms.Add(i);
}
}
if ( hIndex("dict") )
{
m_bWordDict = true; // default to keywords
if ( hIndex["dict"]=="crc" )
m_bWordDict = false;
else if ( hIndex["dict"]!="keywords" )
sWarning.SetSprintf ( "WARNING: unknown dict=%s, defaulting to keywords\n", hIndex["dict"].cstr() );
}
}
void CSphDictSettings::Load ( CSphReader & tReader, CSphEmbeddedFiles & tEmbeddedFiles, FilenameBuilder_i * pFilenameBuilder, CSphString & sWarning )
{
m_sMorphology = tReader.GetString();
m_sMorphFields = tReader.GetString();
tEmbeddedFiles.m_bEmbeddedStopwords = false;
tEmbeddedFiles.m_bEmbeddedStopwords = !!tReader.GetByte();
if ( tEmbeddedFiles.m_bEmbeddedStopwords )
{
int nStopwords = (int)tReader.GetDword();
tEmbeddedFiles.m_dStopwords.Resize ( nStopwords );
ARRAY_FOREACH ( i, tEmbeddedFiles.m_dStopwords )
tEmbeddedFiles.m_dStopwords[i] = (SphWordID_t)tReader.UnzipOffset();
}
m_sStopwords = tReader.GetString ();
int nFiles = tReader.GetDword ();
CSphString sFile;
tEmbeddedFiles.m_dStopwordFiles.Resize ( nFiles );
for ( int i = 0; i < nFiles; i++ )
{
sFile = FormatPath ( tReader.GetString (), pFilenameBuilder );
tEmbeddedFiles.m_dStopwordFiles[i].Read ( tReader, sFile.cstr(), true, tEmbeddedFiles.m_bEmbeddedSynonyms ? NULL : &sWarning );
}
tEmbeddedFiles.m_bEmbeddedWordforms = false;
tEmbeddedFiles.m_bEmbeddedWordforms = !!tReader.GetByte();
if ( tEmbeddedFiles.m_bEmbeddedWordforms )
{
int nWordforms = (int)tReader.GetDword();
tEmbeddedFiles.m_dWordforms.Resize ( nWordforms );
ARRAY_FOREACH ( i, tEmbeddedFiles.m_dWordforms )
tEmbeddedFiles.m_dWordforms[i] = tReader.GetString();
}
m_dWordforms.Resize ( tReader.GetDword() );
tEmbeddedFiles.m_dWordformFiles.Resize ( m_dWordforms.GetLength() );
ARRAY_FOREACH ( i, m_dWordforms )
{
m_dWordforms[i] = tReader.GetString();
sFile = FormatPath ( m_dWordforms[i], pFilenameBuilder );
tEmbeddedFiles.m_dWordformFiles[i].Read ( tReader, sFile.cstr(), false, tEmbeddedFiles.m_bEmbeddedWordforms ? NULL : &sWarning );
}
m_iMinStemmingLen = tReader.GetDword ();
m_bWordDict = ( tReader.GetByte()!=0 );
m_bStopwordsUnstemmed = ( tReader.GetByte()!=0 );
m_sMorphFingerprint = tReader.GetString();
}
void CSphDictSettings::Load ( const bson::Bson_c& tNode, CSphEmbeddedFiles& tEmbeddedFiles, FilenameBuilder_i * pFilenameBuilder, CSphString& sWarning )
{
CSphString sFile;
using namespace bson;
m_sMorphology = String ( tNode.ChildByName ( "morphology" ) );
m_sMorphFields = String ( tNode.ChildByName ( "morph_fields" ) );
m_sStopwords = String ( tNode.ChildByName ( "stopwords" ) );
auto tStopwordsEmbedded = tNode.ChildByName ( "stopwords_list" );
tEmbeddedFiles.m_bEmbeddedStopwords = !IsNullNode ( tStopwordsEmbedded );
if ( tEmbeddedFiles.m_bEmbeddedStopwords )
Bson_c ( tStopwordsEmbedded ).ForEach ( [&tEmbeddedFiles] ( const NodeHandle_t& tNode ) {
tEmbeddedFiles.m_dStopwords.Add ( cast2wordid ( Int ( tNode ) ) );
} );
auto tWordformsEmbedded = tNode.ChildByName ( "word_forms" );
tEmbeddedFiles.m_bEmbeddedWordforms = !IsNullNode ( tWordformsEmbedded ); // fixme!
if ( tEmbeddedFiles.m_bEmbeddedWordforms )
Bson_c ( tWordformsEmbedded ).ForEach ( [&tEmbeddedFiles] ( const NodeHandle_t& tNode ) {
tEmbeddedFiles.m_dWordforms.Add ( String ( tNode ) );
} );
auto tStopwordsNode = tNode.ChildByName ( "stopwords_file_infos" );
if ( !IsNullNode ( tStopwordsNode ) )
Bson_c ( tStopwordsNode ).ForEach ( [ &tEmbeddedFiles, &sWarning, &sFile, &pFilenameBuilder ] ( const NodeHandle_t& tNode )
{
auto & tStopwordsFile = tEmbeddedFiles.m_dStopwordFiles.Add();
sFile = FormatPath ( String ( Bson_c ( tNode ).ChildByName ( "name" ) ), pFilenameBuilder );
tStopwordsFile.Read ( Bson_c ( tNode ).ChildByName ( "info" ), sFile.cstr(), true, tEmbeddedFiles.m_bEmbeddedStopwords ? nullptr : &sWarning );
} );
auto tWordformsFiles = tNode.ChildByName ( "wordforms_file_infos" );
if ( !IsNullNode ( tWordformsFiles ) )
Bson_c ( tWordformsFiles ).ForEach ( [ &tEmbeddedFiles, &sWarning, this, &sFile, &pFilenameBuilder ] ( const NodeHandle_t& tNode )
{
auto & sWordformsFileName = m_dWordforms.Add();
auto & tWordformsFile = tEmbeddedFiles.m_dWordformFiles.Add();
sWordformsFileName = String ( Bson_c ( tNode ).ChildByName ( "name" ) );
sFile = FormatPath ( sWordformsFileName, pFilenameBuilder );
tWordformsFile.Read ( Bson_c ( tNode ).ChildByName ( "info" ), sFile.cstr(), false, tEmbeddedFiles.m_bEmbeddedWordforms ? nullptr : &sWarning );
} );
m_iMinStemmingLen = (int)Int ( tNode.ChildByName ( "min_stemming_len" ), 1 );
m_bWordDict = Bool ( tNode.ChildByName ( "word_dict" ), true );
m_bStopwordsUnstemmed = Bool ( tNode.ChildByName ( "stopwords_unstemmed" ), false );
m_sMorphFingerprint = String ( tNode.ChildByName ( "morph_data_fingerprint" ) );
}
void CSphDictSettings::Format ( SettingsFormatter_c & tOut, FilenameBuilder_i * pFilenameBuilder ) const
{
tOut.Add ( "dict", m_bWordDict ? "keywords" : "crc", !m_bWordDict );
tOut.Add ( "morphology", m_sMorphology, !m_sMorphology.IsEmpty() );
tOut.Add ( "morphology_skip_fields",m_sMorphFields, !m_sMorphFields.IsEmpty() );
tOut.Add ( "min_stemming_len", m_iMinStemmingLen, m_iMinStemmingLen>1 );
tOut.Add ( "stopwords_unstemmed", 1, m_bStopwordsUnstemmed );
CSphString sStopwordsFile = FormatPath ( m_sStopwords, pFilenameBuilder );
tOut.Add ( "stopwords", sStopwordsFile, !sStopwordsFile.IsEmpty() );
StringBuilder_c sAllWordforms(" ");
for ( const auto & i : m_dWordforms )
sAllWordforms << FormatPath ( i, pFilenameBuilder );
tOut.Add ( "wordforms", sAllWordforms.cstr(), !sAllWordforms.IsEmpty() );
}
void CSphDictSettings::DumpReadable ( SettingsFormatterState_t & tState, const CSphEmbeddedFiles & tEmbeddedFiles, FilenameBuilder_i * pFilenameBuilder ) const
{
SettingsFormatter_c tFormatter ( tState, "dictionary-", ": ", "", "\n", true );
Format ( tFormatter, pFilenameBuilder );
tFormatter.AddEmbedded ( "embedded_stopword", tEmbeddedFiles.m_dStopwords, tEmbeddedFiles.m_bEmbeddedStopwords );
tFormatter.AddEmbedded ( "embedded_wordform", tEmbeddedFiles.m_dWordforms, tEmbeddedFiles.m_bEmbeddedWordforms );
}
//////////////////////////////////////////////////////////////////////////
bool CSphFieldFilterSettings::Setup ( const CSphConfigSection & hIndex, CSphString & sWarning )
{
#if WITH_RE2
// regular expressions
m_dRegexps.Resize ( 0 );
for ( CSphVariant * pFilter = hIndex("regexp_filter"); pFilter; pFilter = pFilter->m_pNext )
m_dRegexps.Add ( pFilter->cstr() );
return m_dRegexps.GetLength() > 0;
#else
if ( hIndex ( "regexp_filter" ) )
sWarning.SetSprintf ( "regexp_filter specified but no regexp support compiled" );
return false;
#endif
}
void CSphFieldFilterSettings::Load ( CSphReader & tReader )
{
int nRegexps = tReader.GetDword();
if ( !nRegexps )
return;
m_dRegexps.Resize ( nRegexps );
for ( auto & i : m_dRegexps )
i = tReader.GetString();
}
void CSphFieldFilterSettings::Save ( Writer_i & tWriter ) const
{
tWriter.PutDword ( m_dRegexps.GetLength() );
for ( const auto & i : m_dRegexps )
tWriter.PutString(i);
}
void CSphFieldFilterSettings::Format ( SettingsFormatter_c & tOut, FilenameBuilder_i * /*pFilenameBuilder*/ ) const
{
for ( const auto & i : m_dRegexps )
tOut.Add ( "regexp_filter", i, !i.IsEmpty() );
}
//////////////////////////////////////////////////////////////////////////
CSphString KillListTarget_t::Format() const
{
CSphString sTarget, sFlags;
DWORD uMask = KillListTarget_t::USE_KLIST | KillListTarget_t::USE_DOCIDS;
if ( (m_uFlags & uMask) != uMask )
{
if ( m_uFlags & KillListTarget_t::USE_KLIST )
sFlags=":kl";
if ( m_uFlags & KillListTarget_t::USE_DOCIDS )
sFlags=":id";
} else
sFlags = "";
sTarget.SetSprintf ( "%s%s", m_sIndex.cstr(), sFlags.cstr() );
return sTarget;
}
bool KillListTargets_c::Parse ( const CSphString & sTargets, const char * szIndexName, CSphString & sError )
{
StrVec_t dIndexes;
sphSplit ( dIndexes, sTargets.cstr(), " \t," );
m_dTargets.Resize(dIndexes.GetLength());
ARRAY_FOREACH ( i, m_dTargets )
{
KillListTarget_t & tTarget = m_dTargets[i];
const char * szTargetName = dIndexes[i].cstr();
const char * sSplit = strstr ( szTargetName, ":" );
if ( sSplit )
{
CSphString sOptions = sSplit+1;
if ( sOptions=="kl" )
tTarget.m_uFlags = KillListTarget_t::USE_KLIST;
else if ( sOptions=="id" )
tTarget.m_uFlags = KillListTarget_t::USE_DOCIDS;
else
{
sError.SetSprintf ( "unknown kill list target option near '%s'\n", dIndexes[i].cstr() );
return false;
}
tTarget.m_sIndex = dIndexes[i].SubString ( 0, sSplit-szTargetName );
}
else
tTarget.m_sIndex = szTargetName;
if ( tTarget.m_sIndex == szIndexName )
{
sError.SetSprintf ( "cannot apply kill list to myself: killlist_target=%s\n", sTargets.cstr() );
return false;
}
}
return true;
}
void KillListTargets_c::Format ( SettingsFormatter_c & tOut, FilenameBuilder_i * /*pFilenameBuilder*/ ) const
{
StringBuilder_c sTargets;
auto tComma = ScopedComma_c ( sTargets, "," );
for ( const auto & i : m_dTargets )
sTargets += i.Format().cstr();
tOut.Add ( "killlist_target", sTargets.cstr(), !sTargets.IsEmpty() );
}
//////////////////////////////////////////////////////////////////////////
void CSphIndexSettings::ParseStoredFields ( const CSphConfigSection & hIndex )
{
CSphString sFields = hIndex.GetStr ( "stored_fields", "*" );
sFields.Trim();
sFields.ToLower();
if ( sFields=="*" )
m_dStoredFields.Add("*");
else
{
sphSplit ( m_dStoredFields, sFields.cstr(), ", " );
m_dStoredFields.Uniq();
}
sFields = hIndex.GetStr ( "stored_only_fields" );
sFields.ToLower();
sphSplit ( m_dStoredOnlyFields, sFields.cstr(), ", " );
m_dStoredOnlyFields.Uniq();
}
bool CSphIndexSettings::ParseColumnarSettings ( const CSphConfigSection & hIndex, CSphString & sError )
{
if ( ( hIndex.Exists("columnar_attrs") || hIndex.Exists("columnar_no_fast_fetch") || ( hIndex.Exists("engine") && hIndex["engine"]=="columnar" ) ) && !IsColumnarLibLoaded() )
{
sError = "columnar library not loaded";
return false;
}
{
CSphString sEngine = hIndex.GetStr ( "engine" );
sEngine.ToLower();
if ( !StrToAttrEngine ( m_eEngine, AttrEngine_e::DEFAULT, sEngine, sError ) )
return false;
}
{
CSphString sEngine = hIndex.GetStr ( "engine_default" );
sEngine.ToLower();
if ( !StrToAttrEngine ( m_eDefaultEngine, AttrEngine_e::ROWWISE, sEngine, sError ) )
return false;
}
{
CSphString sAttrs = hIndex.GetStr ( "columnar_attrs" );
sAttrs.Trim();
sAttrs.ToLower();
if ( sAttrs=="*" )
m_dColumnarAttrs.Add("*");
else
{
sphSplit ( m_dColumnarAttrs, sAttrs.cstr() );
m_dColumnarAttrs.Uniq();
}
}
{
CSphString sAttrs = hIndex.GetStr ( "columnar_no_fast_fetch" );
sAttrs.ToLower();
sphSplit ( m_dColumnarNonStoredAttrs, sAttrs.cstr() );
m_dColumnarNonStoredAttrs.Uniq();
}
{
CSphString sAttrs = hIndex.GetStr ( "rowwise_attrs" );
sAttrs.ToLower();
sphSplit ( m_dRowwiseAttrs, sAttrs.cstr() );
m_dRowwiseAttrs.Uniq();
}
{
CSphString sAttrs = hIndex.GetStr ( "columnar_strings_no_hash" );
sAttrs.ToLower();
sphSplit ( m_dColumnarStringsNoHash, sAttrs.cstr() );
m_dColumnarStringsNoHash.Uniq();
}
return true;
}
bool CSphIndexSettings::ParseKNNSettings ( const CSphConfigSection & hIndex, CSphString & sError )
{
if ( !hIndex.Exists("knn") )
return true;
if ( !IsKNNLibLoaded() )
{
sError = "knn library not loaded";
return false;
}
return ParseKNNConfigStr ( hIndex.GetStr("knn"), m_dKNN, sError );
}
bool CSphIndexSettings::ParseSISettings ( const CSphConfigSection & hIndex, CSphString & sError )
{
if ( !hIndex.Exists("json_secondary_indexes") )
return true;
if ( !IsSecondaryLibLoaded() )
{
sError = "secondary index library not loaded";
return false;
}
{
CSphString sAttrs = hIndex.GetStr ( "json_secondary_indexes" );
sAttrs.ToLower();
sphSplit ( m_dJsonSIAttrs, sAttrs.cstr() );
m_dJsonSIAttrs.Uniq();
}
return true;
}
bool CSphIndexSettings::ParseDocstoreSettings ( const CSphConfigSection & hIndex, CSphString & sWarning, CSphString & sError )
{
m_uBlockSize = hIndex.GetSize ( "docstore_block_size", DEFAULT_DOCSTORE_BLOCK );
m_iCompressionLevel = hIndex.GetInt ( "docstore_compression_level", DEFAULT_COMPRESSION_LEVEL );
if ( !hIndex.Exists("docstore_compression") )
return true;
CSphString sCompression = hIndex["docstore_compression"].cstr();
if ( sCompression=="none" )
m_eCompression = Compression_e::NONE;
else if ( sCompression=="lz4" )
m_eCompression = Compression_e::LZ4;
else if ( sCompression=="lz4hc" )
m_eCompression = Compression_e::LZ4HC;
else
{
sError.SetSprintf ( "unknown compression specified in 'docstore_compression': '%s'\n", sCompression.cstr() );
return false;
}
if ( hIndex.Exists("docstore_compression_level") && m_eCompression!=Compression_e::LZ4HC )
sWarning.SetSprintf ( "docstore_compression_level works only with LZ4HC compression" );
return true;
}
bool CSphIndexSettings::ParseCJKSegmentation ( const CSphConfigSection & hIndex, const StrVec_t & dMorphs, CSphString & sWarning, CSphString & sError )
{
bool bICU = dMorphs.Contains ( "icu_chinese" );
bool bJieba = dMorphs.Contains ( "jieba_chinese" );
if ( bICU && bJieba )
{
sError = "ICU and Jieba cannot both be enabled at the same time";
return false;
}
else
m_ePreprocessor = bICU ? Preprocessor_e::ICU : ( bJieba ? Preprocessor_e::JIEBA : Preprocessor_e::NONE );
if ( !sphCheckConfigICU ( *this, sError ) )
return false;
if ( !CheckConfigJieba ( *this, sError ) )
return false;
if ( hIndex.Exists("jieba_hmm") && m_ePreprocessor!=Preprocessor_e::JIEBA )
{
sError = "jieba_hmm can't be used without Jieba morphology enabled";
return false;
}
if ( hIndex.Exists("jieba_mode") && m_ePreprocessor!=Preprocessor_e::JIEBA )
{
sError = "jieba_mode can't be used without Jieba morphology enabled";
return false;
}
if ( hIndex.Exists("jieba_user_dict_path") && m_ePreprocessor!=Preprocessor_e::JIEBA )
{
sError = "jieba_user_dict_path can't be used without Jieba morphology enabled";
return false;
}
m_bJiebaHMM = hIndex.GetBool ( "jieba_hmm", true );
CSphString sJiebaMode = hIndex.GetStr ( "jieba_mode", "accurate" );
if ( !StrToJiebaMode ( m_eJiebaMode , sJiebaMode, sError ) )
return false;
if ( m_eJiebaMode==JiebaMode_e::FULL && hIndex.Exists("jieba_hmm") )
sWarning = "jieba_hmm has no effect when jieba_mode=full";
m_sJiebaUserDictPath = hIndex.GetStr("jieba_user_dict_path");
return true;
}
static const int64_t DEFAULT_ATTR_UPDATE_RESERVE = 131072;
bool CSphIndexSettings::Setup ( const CSphConfigSection & hIndex, const char * szIndexName, CSphString & sWarning, CSphString & sError )
{
// misc settings
SetMinPrefixLen ( Max ( hIndex.GetInt ( "min_prefix_len" ), 0 ) );
m_iMinInfixLen = Max ( hIndex.GetInt ( "min_infix_len" ), 0 );
m_iMaxSubstringLen = Max ( hIndex.GetInt ( "max_substring_len" ), 0 );
m_iBoundaryStep = Max ( hIndex.GetInt ( "phrase_boundary_step" ), -1 );
m_bIndexExactWords = hIndex.GetInt ( "index_exact_words" )!=0;
m_iOvershortStep = Min ( Max ( hIndex.GetInt ( "overshort_step", 1 ), 0 ), 1 );
m_iStopwordStep = Min ( Max ( hIndex.GetInt ( "stopword_step", 1 ), 0 ), 1 );
m_iEmbeddedLimit = hIndex.GetSize ( "embedded_limit", 16384 );
m_bIndexFieldLens = hIndex.GetInt ( "index_field_lengths" )!=0;
m_sIndexTokenFilter = hIndex.GetStr ( "index_token_filter" );
m_tBlobUpdateSpace = hIndex.GetSize64 ( "attr_update_reserve", DEFAULT_ATTR_UPDATE_RESERVE );
m_bBinlog = hIndex.GetBool ( "binlog", true );
if ( !m_tKlistTargets.Parse ( hIndex.GetStr ( "killlist_target" ), szIndexName, sError ) )
return false;
// prefix/infix fields
CSphString sFields;
sFields = hIndex.GetStr ( "prefix_fields" );
sFields.ToLower();
sphSplit ( m_dPrefixFields, sFields.cstr() );
sFields = hIndex.GetStr ( "infix_fields" );
sFields.ToLower();
sphSplit ( m_dInfixFields, sFields.cstr() );
ParseStoredFields(hIndex);
if ( !ParseColumnarSettings ( hIndex, sError ) )
return false;
if ( !ParseKNNSettings ( hIndex, sError ) )
return false;
if ( !ParseSISettings ( hIndex, sError ) )
return false;
if ( RawMinPrefixLen()==0 && m_dPrefixFields.GetLength()!=0 )
{
sWarning = "min_prefix_len=0, prefix_fields ignored";
m_dPrefixFields.Reset();
}
if ( m_iMinInfixLen==0 && m_dInfixFields.GetLength()!=0 )
{
sWarning = "min_infix_len=0, infix_fields ignored";
m_dInfixFields.Reset();
}
m_dPrefixFields.Uniq();
m_dInfixFields.Uniq();
for ( const auto & sField : m_dPrefixFields )
if ( m_dInfixFields.Contains(sField) )
{
sError.SetSprintf ( "field '%s' marked both as prefix and infix", sField.cstr() );
return false;
}
if ( m_iMaxSubstringLen && m_iMaxSubstringLen<m_iMinInfixLen )
{
sError.SetSprintf ( "max_substring_len=%d is less than min_infix_len=%d", m_iMaxSubstringLen, m_iMinInfixLen );
return false;
}
if ( m_iMaxSubstringLen && m_iMaxSubstringLen<RawMinPrefixLen() )
{
sError.SetSprintf ( "max_substring_len=%d is less than min_prefix_len=%d", m_iMaxSubstringLen, RawMinPrefixLen() );
return false;
}
if ( !ParseDocstoreSettings ( hIndex, sWarning, sError ) )
return false;
CSphString sIndexType = hIndex.GetStr ( "dict", "keywords" );
bool bWordDict = true;
if ( sIndexType=="crc" )
bWordDict = false;
else if ( sIndexType!="keywords" )
{
sError.SetSprintf ( "table '%s': unknown dict=%s; only 'keywords' or 'crc' values allowed", szIndexName, sIndexType.cstr() );
return false;
}
if ( hIndex("type") && hIndex["type"]=="rt" && ( m_iMinInfixLen>0 || RawMinPrefixLen()>0 ) && !bWordDict )
{
sError.SetSprintf ( "RT tables support prefixes and infixes with only dict=keywords" );
return false;
}
if ( bWordDict && m_iMaxSubstringLen>0 )
{
sError.SetSprintf ( "max_substring_len can not be used with dict=keywords" );
return false;
}
// the only way we could have both prefixes and infixes enabled is when specific field subsets are configured
if ( !bWordDict && m_iMinInfixLen>0 && RawMinPrefixLen()>0
&& ( !m_dPrefixFields.GetLength() || !m_dInfixFields.GetLength() ) )
{
sError.SetSprintf ( "prefixes and infixes can not both be enabled on all fields" );
return false;
}
// html stripping
if ( hIndex ( "html_strip" ) )
{
m_bHtmlStrip = hIndex.GetInt ( "html_strip" )!=0;
m_sHtmlIndexAttrs = hIndex.GetStr ( "html_index_attrs" );
m_sHtmlRemoveElements = hIndex.GetStr ( "html_remove_elements" );
}
// hit format
// TODO! add the description into documentation.
m_eHitFormat = SPH_HIT_FORMAT_INLINE;
if ( hIndex("hit_format") )
{
if ( hIndex["hit_format"]=="plain" ) m_eHitFormat = SPH_HIT_FORMAT_PLAIN;
else if ( hIndex["hit_format"]=="inline" ) m_eHitFormat = SPH_HIT_FORMAT_INLINE;
else
sWarning.SetSprintf ( "unknown hit_format=%s, defaulting to inline", hIndex["hit_format"].cstr() );
}
// hit-less indices
if ( hIndex("hitless_words") )
{
const CSphString & sValue = hIndex["hitless_words"].strval();
if ( sValue=="all" )
m_eHitless = SPH_HITLESS_ALL;
else
{
m_eHitless = SPH_HITLESS_SOME;
m_sHitlessFiles = sValue;
}
}
// sentence and paragraph indexing
m_bIndexSP = ( hIndex.GetInt ( "index_sp" )!=0 );
m_sZones = hIndex.GetStr ( "index_zones" );
// bigrams
m_eBigramIndex = SPH_BIGRAM_NONE;
if ( hIndex("bigram_index") )
{
CSphString s = hIndex["bigram_index"].strval();
s.ToLower();
if ( s=="all" )
m_eBigramIndex = SPH_BIGRAM_ALL;
else if ( s=="first_freq" )
m_eBigramIndex = SPH_BIGRAM_FIRSTFREQ;
else if ( s=="both_freq" )
m_eBigramIndex = SPH_BIGRAM_BOTHFREQ;
else
{
sError.SetSprintf ( "unknown bigram_index=%s (must be all, first_freq, or both_freq)", s.cstr() );
return false;
}
}
m_sBigramWords = hIndex.GetStr ( "bigram_freq_words" );
m_sBigramWords.Trim();
bool bEmptyOk = m_eBigramIndex==SPH_BIGRAM_NONE || m_eBigramIndex==SPH_BIGRAM_ALL;
if ( bEmptyOk!=m_sBigramWords.IsEmpty() )
{
sError.SetSprintf ( "bigram_index=%s, bigram_freq_words must%s be empty", hIndex["bigram_index"].cstr(),
bEmptyOk ? "" : " not" );
return false;
}
// aot
StrVec_t dMorphs;
sphSplit ( dMorphs, hIndex.GetStr ( "morphology" ).cstr() );
m_uAotFilterMask = 0;
for ( int j=0; j<AOT_LENGTH; ++j )
{
char buf_all[20];
snprintf ( buf_all, 19, "lemmatize_%s_all", AOT_LANGUAGES[j] ); //NOLINT
buf_all[19] = '\0';
ARRAY_FOREACH ( i, dMorphs )
if ( dMorphs[i]==buf_all )
{
m_uAotFilterMask |= (1UL) << j;
break;
}
}
if ( !ParseCJKSegmentation ( hIndex, dMorphs, sWarning, sError ) )
return false;
// all good
return true;
}
static void AddEngineSettings ( AttrEngine_e eEngine, SettingsFormatter_c & tOut )
{
if ( eEngine==AttrEngine_e::COLUMNAR )
tOut.Add ( "engine", "columnar", true );
else if ( eEngine==AttrEngine_e::ROWWISE )
tOut.Add ( "engine", "rowwise", true );
}
void CSphIndexSettings::Format ( SettingsFormatter_c & tOut, FilenameBuilder_i * pFilenameBuilder ) const
{
tOut.Add ( "min_prefix_len", RawMinPrefixLen(), RawMinPrefixLen()!=0 );
tOut.Add ( "min_infix_len", m_iMinInfixLen, m_iMinInfixLen!=0 );
tOut.Add ( "max_substring_len", m_iMaxSubstringLen, m_iMaxSubstringLen!=0 );
tOut.Add ( "index_exact_words", 1, m_bIndexExactWords );
tOut.Add ( "html_strip", 1, m_bHtmlStrip );
tOut.Add ( "html_index_attrs", m_sHtmlIndexAttrs, !m_sHtmlIndexAttrs.IsEmpty() );
tOut.Add ( "html_remove_elements", m_sHtmlRemoveElements, !m_sHtmlRemoveElements.IsEmpty() );
tOut.Add ( "index_zones", m_sZones, !m_sZones.IsEmpty() );
tOut.Add ( "index_field_lengths", 1, m_bIndexFieldLens );
tOut.Add ( "index_sp", 1, m_bIndexSP );
tOut.Add ( "phrase_boundary_step", m_iBoundaryStep, m_iBoundaryStep!=0 );
tOut.Add ( "stopword_step", m_iStopwordStep, m_iStopwordStep!=1 );
tOut.Add ( "overshort_step", m_iOvershortStep, m_iOvershortStep!=1 );
tOut.Add ( "bigram_index", BigramName(m_eBigramIndex), m_eBigramIndex!=SPH_BIGRAM_NONE );
tOut.Add ( "bigram_freq_words", m_sBigramWords, !m_sBigramWords.IsEmpty() );
tOut.Add ( "index_token_filter", m_sIndexTokenFilter, !m_sIndexTokenFilter.IsEmpty() );
tOut.Add ( "attr_update_reserve", m_tBlobUpdateSpace, m_tBlobUpdateSpace!=DEFAULT_ATTR_UPDATE_RESERVE );
tOut.Add ( "binlog", 0, !m_bBinlog );
if ( m_eHitless==SPH_HITLESS_ALL )
{
tOut.Add ( "hitless_words", "all", true );
} else if ( m_eHitless==SPH_HITLESS_SOME )
{
CSphString sHitlessFiles = FormatPath ( m_sHitlessFiles, pFilenameBuilder );
tOut.Add ( "hitless_words", sHitlessFiles, true );
}
AddEngineSettings ( m_eEngine, tOut );
if ( m_eEngine==AttrEngine_e::DEFAULT && m_eDefaultEngine!=GetDefaultAttrEngine() )
AddEngineSettings ( m_eDefaultEngine, tOut );
if ( m_eJiebaMode==JiebaMode_e::FULL )
tOut.Add ( "jieba_mode", "full", true );
else if ( m_eJiebaMode==JiebaMode_e::SEARCH )
tOut.Add ( "jieba_mode", "search", true );
tOut.Add ( "jieba_hmm", 0, !m_bJiebaHMM );
CSphString sJiebaDict = FormatPath ( m_sJiebaUserDictPath, pFilenameBuilder );
tOut.Add ( "jieba_user_dict_path", sJiebaDict, !sJiebaDict.IsEmpty() );
DocstoreSettings_t::Format ( tOut, pFilenameBuilder );
}
//////////////////////////////////////////////////////////////////////////
void FileAccessSettings_t::Format ( SettingsFormatter_c & tOut, FilenameBuilder_i * pFilenameBuilder ) const
{
FileAccessSettings_t tDefault;
tOut.Add ( "read_buffer_docs", m_iReadBufferDocList, m_iReadBufferDocList!=tDefault.m_iReadBufferDocList );
tOut.Add ( "read_buffer_hits", m_iReadBufferHitList, m_iReadBufferHitList!=tDefault.m_iReadBufferHitList );
tOut.Add ( "access_doclists", FileAccessName(m_eDoclist), m_eDoclist!=tDefault.m_eDoclist );
tOut.Add ( "access_hitlists", FileAccessName(m_eHitlist), m_eHitlist!=tDefault.m_eHitlist );
tOut.Add ( "access_plain_attrs", FileAccessName(m_eAttr) , m_eAttr!=tDefault.m_eAttr );
tOut.Add ( "access_blob_attrs", FileAccessName(m_eBlob) , m_eBlob!=tDefault.m_eBlob );
tOut.Add ( "access_dict", FileAccessName(m_eDict) , m_eDict!=tDefault.m_eDict );
}
//////////////////////////////////////////////////////////////////////////
static void SplitArg ( const CSphString & sValue, StrVec_t & dFiles )
{
dFiles = sphSplit ( sValue.cstr(), sValue.Length(), " \t," );
for ( auto & sFile : dFiles )
sFile.Trim();
}
bool StrToAttrEngine ( AttrEngine_e & eEngine, AttrEngine_e eDefault, const CSphString & sValue, CSphString & sError )
{
if ( sValue.IsEmpty() )
{
eEngine = eDefault;
return true;
}
if ( sValue!="columnar" && sValue!="rowwise" )
{
sError.SetSprintf ( "unknown engine: %s", sValue.cstr() );
return false;
}
if ( sValue=="columnar" )
eEngine = AttrEngine_e::COLUMNAR;
else if ( sValue=="rowwise" )
eEngine = AttrEngine_e::ROWWISE;
return true;
}
struct ExtFiles_t
{
StrVec_t m_dFiles;
bool m_bFilesSet = false; // was this option set?
bool m_bExtCopy = false; // copy external files to table's folder?
};
class IndexSettingsContainer_c : public IndexSettingsContainer_i
{
public:
IndexSettingsContainer_c() = default;
~IndexSettingsContainer_c() override;
bool Populate ( const CreateTableSettings_t & tCreateTable, bool bExtCopy ) override;
bool Add ( const char * szName, const CSphString & sValue ) override;
bool Add ( const CSphString & sName, const CSphString & sValue ) override;
CSphString Get ( const CSphString & sName ) const override;
bool Contains ( const char * szName ) const override;
void RemoveKeys ( const CSphString & sName ) override;
bool AddOption ( const CSphString & sName, const CSphString & sValue, bool bExtCopy ) override;
bool CheckPaths() override;
bool CopyExternalFiles ( const CSphString & sIndexPath, int iSuffix ) override;
void ResetCleanup() override;
const CSphConfigSection & AsCfg() const override;
const CSphString & GetError() const override { return m_sError; }
private:
CSphConfigSection m_hCfg;
ExtFiles_t m_tStopword;
ExtFiles_t m_tException;
ExtFiles_t m_tWordform;
ExtFiles_t m_tHitless;
ExtFiles_t m_tJiebaDict;
StrVec_t m_dCleanupFiles;
CSphString m_sError;
AttrEngine_e m_eEngine = AttrEngine_e::DEFAULT;
void SetupColumnarAttrs ( const CreateTableSettings_t & tCreateTable );
void SetupKNNAttrs ( const CreateTableSettings_t & tCreateTable );
void SetupSIAttrs ( const CreateTableSettings_t & tCreateTable );
void SetDefaults();
StrVec_t GetFiles();
bool CopyExternalFiles ( const ExtFiles_t & tExt, const CSphString & sDestPath, const char * sKeyName, int iSuffix, int & iFile );
bool CopyExternalFile ( const CSphString & sSrcFile, const CSphString & sDestPath, const char * sKeyName, int iSuffix, StringBuilder_c & sFilesOpt, int & iFile );
};
IndexSettingsContainer_i * CreateIndexSettingsContainer ()
{
return new IndexSettingsContainer_c();
}
IndexSettingsContainer_c::~IndexSettingsContainer_c()
{
for ( const auto & sFile : m_dCleanupFiles )
unlink ( sFile.cstr() );
}
void IndexSettingsContainer_c::ResetCleanup()
{
m_dCleanupFiles.Reset();
}
bool IndexSettingsContainer_c::AddOption ( const CSphString & sName, const CSphString & sValue, bool bExtCopy )
{
if ( sName=="type" && sValue=="pq" )
{
CSphString sNewValue = "percolate";
return Add ( sName, sNewValue );
}
if ( sName=="stopwords" )
{
// new value replaces previous
m_tStopword.m_dFiles.Reset();
m_tStopword.m_bExtCopy = bExtCopy;
m_tStopword.m_bFilesSet = true;
SplitArg ( sValue, m_tStopword.m_dFiles );
// m_dStopwordFiles now holds the files with olds paths
// there's a hack in stopword loading code that modifies the folder to pre-installed
// let's use this hack here and copy that file too so that we're fully standalone
for ( auto & sFile : m_tStopword.m_dFiles )
{
if ( !sphIsReadable ( sFile ) )
{
CSphString sFilename;
sFilename.SetSprintf ( "%s/stopwords/%s", GET_FULL_SHARE_DIR (), StripPath ( sFile ).cstr() );
if ( sphIsReadable ( sFilename.cstr() ) )
{
sFile = sFilename;
} else
{
m_sError.SetSprintf ( "'stopwords' file missed %s", sFile.cstr() );
return false;
}
}
}
// will add string option after copy
return true;
}
if ( sName=="exceptions" )
{
// new value replaces previous
m_tException.m_dFiles.Reset();
m_tException.m_bExtCopy = bExtCopy;
m_tException.m_bFilesSet = true;
SplitArg ( sValue, m_tException.m_dFiles );
if ( m_tException.m_dFiles.GetLength()>1 )
{
m_sError = "'exceptions' options only supports a single file";
return false;
} else if ( m_tException.m_dFiles.IsEmpty() )
{
// needs an empty value
m_tException.m_dFiles.Add();
}
// will add string option after copy
return true;
}
if ( sName=="wordforms" )
{
// multiple wordworms are ok - no need to reset
m_tWordform.m_bExtCopy = bExtCopy;
m_tWordform.m_bFilesSet = true;
// will add string option after copy
SplitArg ( sValue, m_tWordform.m_dFiles );
return true;
}
if ( sName=="hitless_words" && ( sValue!="none" && sValue!="all" ) )
{
// new value replaces previous
m_tHitless.m_dFiles.Reset();
m_tHitless.m_bExtCopy = bExtCopy;
m_tHitless.m_bFilesSet = true;
SplitArg ( sValue, m_tHitless.m_dFiles );
// will add string option after copy
return true;
}
if ( sName=="jieba_user_dict_path" )
{
// new value replaces previous
m_tJiebaDict.m_dFiles.Reset();
m_tJiebaDict.m_bExtCopy = bExtCopy;
m_tJiebaDict.m_bFilesSet = true;
SplitArg ( sValue, m_tJiebaDict.m_dFiles );
if ( m_tJiebaDict.m_dFiles.GetLength()>1 )
{
m_sError = "'jieba_user_dict_path' options only supports a single file";
return false;
}
else if ( m_tJiebaDict.m_dFiles.IsEmpty() )
{
// needs an empty value
m_tJiebaDict.m_dFiles.Add();
}
// will add string option after copy
return true;
}
if ( sName=="engine" )
{
if ( !StrToAttrEngine ( m_eEngine, AttrEngine_e::DEFAULT, sValue, m_sError ) )
return false;
return Add ( sName, sValue );
}
return Add ( sName, sValue );
}
void IndexSettingsContainer_c::RemoveKeys ( const CSphString & sName )
{
m_hCfg.Delete(sName);
}
void IndexSettingsContainer_c::SetupColumnarAttrs ( const CreateTableSettings_t & tCreateTable )
{
StringBuilder_c sColumnarAttrs(",");
StringBuilder_c sRowwiseAttrs(",");
StringBuilder_c sColumnarNonStored(",");
StringBuilder_c sColumnarStringsNoHash(",");
for ( const auto & i : tCreateTable.m_dAttrs )
{
const CSphColumnInfo & tAttr = i.m_tAttr;
if ( tAttr.m_eEngine==AttrEngine_e::COLUMNAR )
sColumnarAttrs << tAttr.m_sName;
else if ( tAttr.m_eEngine==AttrEngine_e::ROWWISE )
sRowwiseAttrs << tAttr.m_sName;
if ( CombineEngines ( m_eEngine, tAttr.m_eEngine )==AttrEngine_e::COLUMNAR )
{
if ( !i.m_bFastFetch )
sColumnarNonStored << tAttr.m_sName;
if ( !i.m_bStringHash )
sColumnarStringsNoHash << tAttr.m_sName;
}
}
if ( sColumnarAttrs.GetLength() )
Add ( "columnar_attrs", sColumnarAttrs.cstr() );
if ( sRowwiseAttrs.GetLength() )
Add ( "rowwise_attrs", sRowwiseAttrs.cstr() );
if ( sColumnarNonStored.GetLength() )
Add ( "columnar_no_fast_fetch", sColumnarNonStored.cstr() );
if ( sColumnarStringsNoHash.GetLength() )
Add ( "columnar_strings_no_hash", sColumnarStringsNoHash.cstr() );
}
void IndexSettingsContainer_c::SetupKNNAttrs ( const CreateTableSettings_t & tCreateTable )
{
StringBuilder_c sColumnarAttrs(",");
CSphVector<NamedKNNSettings_t> dKNNAttrs;
for ( const auto & i : tCreateTable.m_dAttrs )
if ( i.m_bKNN )
{
NamedKNNSettings_t & tNamedKNN = dKNNAttrs.Add();
(knn::IndexSettings_t&)tNamedKNN = i.m_tKNN;
tNamedKNN.m_sName = i.m_tAttr.m_sName;
}
if ( !dKNNAttrs.GetLength() )
return;
Add ( "knn", FormatKNNConfigStr(dKNNAttrs).cstr() );
}
void IndexSettingsContainer_c::SetupSIAttrs ( const CreateTableSettings_t & tCreateTable )
{
StringBuilder_c sJsonSIAttrs(",");
for ( const auto & i : tCreateTable.m_dAttrs )
if ( i.m_bIndexed )
sJsonSIAttrs << i.m_tAttr.m_sName;
if ( sJsonSIAttrs.GetLength() )
Add ( "json_secondary_indexes", sJsonSIAttrs.cstr() );
}
bool IndexSettingsContainer_c::Populate ( const CreateTableSettings_t & tCreateTable, bool bExtCopy )
{
StringBuilder_c sStoredFields(",");
StringBuilder_c sStoredOnlyFields(",");
for ( const auto & i : tCreateTable.m_dFields )
{
Add ( "rt_field", i.m_sName );
DWORD uFlags = i.m_uFieldFlags;
if ( !uFlags )
uFlags = CSphColumnInfo::FIELD_INDEXED | CSphColumnInfo::FIELD_STORED;
if ( uFlags==CSphColumnInfo::FIELD_STORED )
sStoredOnlyFields << i.m_sName;
else if ( uFlags & CSphColumnInfo::FIELD_STORED )
sStoredFields << i.m_sName;
}
Add ( "stored_fields", sStoredFields.cstr() );
if ( sStoredOnlyFields.GetLength() )
Add ( "stored_only_fields", sStoredOnlyFields.cstr() );
for ( const auto & i : tCreateTable.m_dAttrs )
for ( const auto & j : g_dRtTypedAttrs )
{
const CSphColumnInfo & tAttr = i.m_tAttr;
if ( tAttr.m_eAttrType==j.m_eType )
{
CSphString sValue;
if ( tAttr.m_eAttrType==SPH_ATTR_INTEGER && tAttr.m_tLocator.m_iBitCount!=-1 )
sValue.SetSprintf ( "%s:%d", tAttr.m_sName.cstr(), tAttr.m_tLocator.m_iBitCount );
else
sValue = tAttr.m_sName;
Add ( j.m_szName, sValue );
break;
}
}
for ( const auto & i : tCreateTable.m_dOpts )
if ( !AddOption ( i.m_sName, i.m_sValue, bExtCopy ) )
return false;
SetupColumnarAttrs(tCreateTable);
SetupKNNAttrs(tCreateTable);
SetupSIAttrs(tCreateTable);
if ( !Contains("type") )
Add ( "type", "rt" );
if ( !Contains("engine_default") && GetDefaultAttrEngine()==AttrEngine_e::COLUMNAR )
Add ( "engine_default", "columnar" );
bool bDistributed = Get("type")=="distributed";
if ( !bDistributed )
Add ( "embedded_limit", "0" );
SetDefaults();
return CheckPaths();
}
bool IndexSettingsContainer_c::Add ( const char * szName, const CSphString & sValue )
{
// same behavior as an ordinary config parser
m_hCfg.AddEntry ( szName, sValue.cstr() );
return true;
}
bool IndexSettingsContainer_c::Add ( const CSphString & sName, const CSphString & sValue )
{
return Add ( sName.cstr(), sValue );
}
CSphString IndexSettingsContainer_c::Get ( const CSphString & sName ) const
{
if ( !Contains ( sName.cstr() ) )
return "";
return m_hCfg[sName].strval();
}
bool IndexSettingsContainer_c::Contains ( const char * szName ) const
{
return m_hCfg.Exists(szName);
}
StrVec_t IndexSettingsContainer_c::GetFiles()
{
StrVec_t dFiles;
for ( const auto & i : m_tStopword.m_dFiles )
dFiles.Add ( i );
for ( const auto & i : m_tException.m_dFiles )
dFiles.Add ( i );
for ( const auto & i : m_tWordform.m_dFiles )
{
StrVec_t dFilesFound = FindFiles ( i.cstr() );
for ( const auto & j : dFilesFound )
dFiles.Add(j);
// missed wordforms for file without wildcard should fail create table
if ( dFilesFound.IsEmpty() && !HasWildcards ( i.cstr() ) )
{
m_sError.SetSprintf ( "file not found: '%s'", i.cstr() );
return StrVec_t();
}
}
for ( const auto & i : m_tHitless.m_dFiles )
dFiles.Add(i);
for ( const auto & i : m_tJiebaDict.m_dFiles )
dFiles.Add(i);
return dFiles;
}
const CSphConfigSection & IndexSettingsContainer_c::AsCfg() const
{
return m_hCfg;
}
// TODO: read defaults from file or predefined templates
static std::pair<const char* , const char *> g_dIndexSettingsDefaults[] =
{
{ "charset_table", "non_cont" }
};
void IndexSettingsContainer_c::SetDefaults()
{
for ( const auto & tItem : g_dIndexSettingsDefaults )
{
if ( !m_hCfg.Exists ( tItem.first ) )
Add ( tItem.first, tItem.second );
}
}
bool IndexSettingsContainer_c::CheckPaths()
{
StrVec_t dFiles = GetFiles();
if ( !m_sError.IsEmpty() )
return false;
for ( const auto & i : dFiles )
{
if ( i.IsEmpty() )
continue;
if ( HasWildcard ( i.cstr() ) && FindFiles ( i.cstr(), false ).IsEmpty() )
{
m_sError.SetSprintf ( "file—ã not found: '%s'", i.cstr() );
return false;
}
if ( !sphIsReadable(i) )
{
m_sError.SetSprintf ( "file not found: '%s'", i.cstr() );
return false;
}
if ( !IsPathAbsolute(i) )
{
m_sError.SetSprintf ( "paths to external files should be absolute: '%s'" , i.cstr() );
return false;
}
}
return true;
}
bool IndexSettingsContainer_c::CopyExternalFiles ( const CSphString & sIndexPath, int iSuffix )
{
int iFile = 0;
if ( !CopyExternalFiles ( m_tStopword, sIndexPath, "stopwords", iSuffix, iFile ) )
return false;
iFile = 0;
if ( !CopyExternalFiles ( m_tException, sIndexPath, "exceptions", iSuffix, iFile ) )
return false;
iFile = 0;
if ( !CopyExternalFiles ( m_tHitless, sIndexPath, "hitless_words", iSuffix, iFile ) )
return false;
iFile = 0;
if ( !CopyExternalFiles ( m_tJiebaDict, sIndexPath, "jieba_user_dict_path", iSuffix, iFile ) )
return false;
if ( m_tWordform.m_bFilesSet )
{
int iFile = 0;
ExtFiles_t tFiles;
tFiles.m_bFilesSet = true;
tFiles.m_bExtCopy = m_tWordform.m_bExtCopy;
for ( const auto & sWordformFiles : m_tWordform.m_dFiles )
{
StrVec_t dFiles = FindFiles ( sWordformFiles.cstr() );
for ( const auto & sSingleFile : dFiles )
{
tFiles.m_dFiles.Reset();
tFiles.m_dFiles.Add ( sSingleFile );
if ( !CopyExternalFiles ( tFiles, sIndexPath, "wordforms", iSuffix, iFile ) )
return false;
}
}
}
return true;
}
bool IndexSettingsContainer_c::CopyExternalFile ( const CSphString & sSrcFile, const CSphString & sDestPath, const char * sKeyName, int iSuffix, StringBuilder_c & sFilesOpt, int & iFile )
{
CSphString sDstFile;
do
{
sDstFile.SetSprintf ( "%s/%s_chunk%d_%d.txt", sDestPath.cstr(), sKeyName, iSuffix, iFile );
iFile++;
} while ( sphIsReadable ( sDstFile.cstr() ) );
if ( !CopyFile ( sSrcFile, sDstFile, m_sError ) )
return false;
m_dCleanupFiles.Add ( sDstFile );
sFilesOpt << StripPath ( sDstFile ).cstr();
return true;
}
bool IndexSettingsContainer_c::CopyExternalFiles ( const ExtFiles_t & tExt, const CSphString & sDestPath, const char * sKeyName, int iSuffix, int & iFile )
{
if ( !tExt.m_bFilesSet )
return true;
StringBuilder_c sFilesOpt ( " " );
if ( tExt.m_bExtCopy )
{
for ( const auto & sSrcFile : tExt.m_dFiles )
{
if ( sSrcFile.IsEmpty() )
continue;
if ( !CopyExternalFile ( sSrcFile, sDestPath, sKeyName, iSuffix, sFilesOpt, iFile ) )
return false;
}
} else
{
for ( const auto & sSrcFile : tExt.m_dFiles )
{
CSphString sDstFile = sSrcFile;
sFilesOpt << StripPath ( sDstFile ).cstr();
}
}
Add ( sKeyName, sFilesOpt.cstr() );
return true;
}
//////////////////////////////////////////////////////////////////////////
static void WriteFileInfo ( Writer_i & tWriter, const CSphSavedFile & tInfo )
{
tWriter.PutOffset ( tInfo.m_uSize );
tWriter.PutOffset ( tInfo.m_uCTime );
tWriter.PutOffset ( tInfo.m_uMTime );
tWriter.PutDword ( tInfo.m_uCRC32 );
}
void operator<< ( JsonEscapedBuilder & tOut, const CSphSavedFile & tInfo )
{
auto _ = tOut.Object ();
tOut.NamedValNonDefault ( "size", tInfo.m_uSize );
tOut.NamedValNonDefault ( "ctime", tInfo.m_uCTime );
tOut.NamedValNonDefault ( "mtime", tInfo.m_uMTime );
tOut.NamedValNonDefault ( "crc32", tInfo.m_uCRC32 );
}
/// gets called from and MUST be in sync with RtIndex_c::SaveDiskHeader()!
/// note that SaveDiskHeader() occasionaly uses some PREVIOUS format version!
void SaveTokenizerSettings ( Writer_i & tWriter, const TokenizerRefPtr_c & pTokenizer, int iEmbeddedLimit )
{
assert ( pTokenizer );
const CSphTokenizerSettings & tSettings = pTokenizer->GetSettings ();
tWriter.PutByte ( tSettings.m_iType );
tWriter.PutString ( tSettings.m_sCaseFolding.cstr() );
tWriter.PutDword ( tSettings.m_iMinWordLen );
bool bEmbedSynonyms = ( iEmbeddedLimit>0 && pTokenizer->GetSynFileInfo ().m_uSize<=(SphOffset_t)iEmbeddedLimit );
tWriter.PutByte ( bEmbedSynonyms ? 1 : 0 );
if ( bEmbedSynonyms )
pTokenizer->WriteSynonyms ( tWriter );
tWriter.PutString ( tSettings.m_sSynonymsFile.cstr() );
WriteFileInfo ( tWriter, pTokenizer->GetSynFileInfo () );
tWriter.PutString ( tSettings.m_sBoundary.cstr() );
tWriter.PutString ( tSettings.m_sIgnoreChars.cstr() );
tWriter.PutDword ( tSettings.m_iNgramLen );
tWriter.PutString ( tSettings.m_sNgramChars.cstr() );
tWriter.PutString ( tSettings.m_sBlendChars.cstr() );
tWriter.PutString ( tSettings.m_sBlendMode.cstr() );
}
void SaveTokenizerSettings ( JsonEscapedBuilder& tOut, const TokenizerRefPtr_c& pTokenizer, int iEmbeddedLimit )
{
auto _ = tOut.ObjectW();
const CSphTokenizerSettings& tSettings = pTokenizer->GetSettings();
tOut.NamedVal ( "type", tSettings.m_iType );
tOut.NamedStringNonEmpty( "case_folding", tSettings.m_sCaseFolding );
tOut.NamedValNonDefault ( "min_word_len", tSettings.m_iMinWordLen, 1);
bool bEmbedSynonyms = ( iEmbeddedLimit>0 && pTokenizer->GetSynFileInfo ().m_uSize<=(SphOffset_t)iEmbeddedLimit );
if ( bEmbedSynonyms )
pTokenizer->WriteSynonyms ( tOut );
if ( !tSettings.m_sSynonymsFile.IsEmpty() )
{
tOut.NamedString ( "synonyms_file", tSettings.m_sSynonymsFile );
tOut.NamedVal ( "syn_file_info", pTokenizer->GetSynFileInfo() );
}
tOut.NamedStringNonEmpty ( "boundary", tSettings.m_sBoundary );
tOut.NamedStringNonEmpty ( "ignore_chars", tSettings.m_sIgnoreChars );
tOut.NamedValNonDefault ( "ngram_len", tSettings.m_iNgramLen );
tOut.NamedStringNonEmpty ( "ngram_chars", tSettings.m_sNgramChars );
tOut.NamedStringNonEmpty ( "blend_chars", tSettings.m_sBlendChars );
tOut.NamedStringNonEmpty ( "blend_mode", tSettings.m_sBlendMode );
}
void operator<< ( JsonEscapedBuilder& tOut, const CSphFieldFilterSettings& tFieldFilterSettings )
{
auto _ = tOut.Array();
for ( const auto& i : tFieldFilterSettings.m_dRegexps )
tOut.FixupSpacedAndAppendEscaped(i.cstr());
}
/// gets called from and MUST be in sync with RtIndex_c::SaveDiskHeader()!
/// note that SaveDiskHeader() occasionaly uses some PREVIOUS format version!
void SaveDictionarySettings ( Writer_i & tWriter, const DictRefPtr_c & pDict, bool bForceWordDict, int iEmbeddedLimit )
{
assert ( pDict );
const CSphDictSettings & tSettings = pDict->GetSettings ();
tWriter.PutString ( tSettings.m_sMorphology.cstr() );
tWriter.PutString ( tSettings.m_sMorphFields.cstr() );
const CSphVector <CSphSavedFile> & dSWFileInfos = pDict->GetStopwordsFileInfos ();
SphOffset_t uTotalSize = 0;
ARRAY_FOREACH ( i, dSWFileInfos )
uTotalSize += dSWFileInfos[i].m_uSize;
// embed only in case it allowed
bool bEmbedStopwords = ( iEmbeddedLimit>0 && uTotalSize<=(SphOffset_t)iEmbeddedLimit );
tWriter.PutByte ( bEmbedStopwords ? 1 : 0 );
if ( bEmbedStopwords )
pDict->WriteStopwords ( tWriter );
tWriter.PutString ( tSettings.m_sStopwords.cstr() );
tWriter.PutDword ( dSWFileInfos.GetLength () );
ARRAY_FOREACH ( i, dSWFileInfos )
{
tWriter.PutString ( dSWFileInfos[i].m_sFilename.cstr() );
WriteFileInfo ( tWriter, dSWFileInfos[i] );
}
const CSphVector <CSphSavedFile> & dWFFileInfos = pDict->GetWordformsFileInfos ();
uTotalSize = 0;
ARRAY_FOREACH ( i, dWFFileInfos )
uTotalSize += dWFFileInfos[i].m_uSize;
bool bEmbedWordforms = uTotalSize<=(SphOffset_t)iEmbeddedLimit;
tWriter.PutByte ( bEmbedWordforms ? 1 : 0 );
if ( bEmbedWordforms )
pDict->WriteWordforms ( tWriter );
tWriter.PutDword ( dWFFileInfos.GetLength() );
ARRAY_FOREACH ( i, dWFFileInfos )
{
tWriter.PutString ( tSettings.m_dWordforms[i] );
WriteFileInfo ( tWriter, dWFFileInfos[i] );
}
tWriter.PutDword ( tSettings.m_iMinStemmingLen );
tWriter.PutByte ( tSettings.m_bWordDict || bForceWordDict );
tWriter.PutByte ( tSettings.m_bStopwordsUnstemmed );
tWriter.PutString ( pDict->GetMorphDataFingerprint() );
}
void SaveDictionarySettings ( JsonEscapedBuilder& tOut, const DictRefPtr_c& pDict, bool bForceWordDict, int iEmbeddedLimit )
{
assert ( pDict );
auto _ = tOut.ObjectW();
const CSphDictSettings& tSettings = pDict->GetSettings();
tOut.NamedStringNonEmpty ( "morphology", tSettings.m_sMorphology );
tOut.NamedStringNonEmpty ( "morph_fields", tSettings.m_sMorphFields );
tOut.NamedStringNonEmpty ( "stopwords", tSettings.m_sStopwords );
SphOffset_t uTotalSize = 0;
const auto& dStopwordsInfos = pDict->GetStopwordsFileInfos();
if ( !dStopwordsInfos.IsEmpty() )
{
tOut.Named ( "stopwords_file_infos" );
auto _ = tOut.ArrayW();
for ( const auto& tInfo: dStopwordsInfos )
if ( !tInfo.m_sFilename.IsEmpty() )
{
auto _ = tOut.Object();
tOut.NamedString ( "name", tInfo.m_sFilename );
tOut.NamedVal ( "info", tInfo );
uTotalSize += tInfo.m_uSize;
}
}
// embed only in case it allowed
if ( iEmbeddedLimit > 0 && uTotalSize <= (SphOffset_t)iEmbeddedLimit )
pDict->WriteStopwords ( tOut );
uTotalSize = 0;
const auto& dWordformsInfos = pDict->GetWordformsFileInfos();
if ( !dWordformsInfos.IsEmpty() )
{
tOut.Named ( "wordforms_file_infos" );
auto _ = tOut.ArrayW();
ARRAY_FOREACH ( i, dWordformsInfos )
{
const auto& tInfo = dWordformsInfos[i];
if ( !tInfo.m_sFilename.IsEmpty() )
{
auto _ = tOut.Object();
tOut.NamedString ( "name", tSettings.m_dWordforms[i] ); // trick! tInfo.m_sFilename contains full path, but we need tSettings.m_dWordforms is stripped one
tOut.NamedVal ( "info", tInfo );
uTotalSize += tInfo.m_uSize;
}
}
}
// embed only in case it allowed
if ( iEmbeddedLimit > 0 && uTotalSize <= (SphOffset_t)iEmbeddedLimit )
pDict->WriteWordforms ( tOut );
tOut.NamedValNonDefault ( "min_stemming_len", tSettings.m_iMinStemmingLen, 1 );
tOut.NamedValNonDefault ( "word_dict", tSettings.m_bWordDict || bForceWordDict, true );
tOut.NamedValNonDefault ( "stopwords_unstemmed", tSettings.m_bStopwordsUnstemmed, false );
tOut.NamedStringNonEmpty ( "morph_data_fingerprint", pDict->GetMorphDataFingerprint() );
}
//////////////////////////////////////////////////////////////////////////
static void FormatAllSettings ( const CSphIndex & tIndex, SettingsFormatter_c & tFormatter, FilenameBuilder_i * pFilenameBuilder )
{
if ( tIndex.IsPQ() )
tFormatter.Add ( "type", "pq", true );
tIndex.GetSettings().Format ( tFormatter, pFilenameBuilder );
CSphFieldFilterSettings tFieldFilter;
tIndex.GetFieldFilterSettings ( tFieldFilter );
tFieldFilter.Format ( tFormatter, pFilenameBuilder );
KillListTargets_c tKlistTargets;
CSphString sWarning;
if ( !tIndex.LoadKillList ( nullptr, tKlistTargets, sWarning ) )
tKlistTargets.m_dTargets.Reset();
tKlistTargets.Format ( tFormatter, pFilenameBuilder );
auto pTokenizer = tIndex.GetTokenizer();
if ( pTokenizer )
pTokenizer->GetSettings().Format ( tFormatter, pFilenameBuilder );
auto pDict = tIndex.GetDictionary();
if ( pDict )
pDict->GetSettings().Format ( tFormatter, pFilenameBuilder );
tIndex.GetMutableSettings().Format ( tFormatter, pFilenameBuilder );
if ( tIndex.m_iTID==-1 )
tFormatter.Add ( "binlog", "0", true );
}
// fixme! this is basically a duplicate of the above function, but has extra code due to embedded
void DumpReadable ( FILE * fp, const CSphIndex & tIndex, const CSphEmbeddedFiles & tEmbeddedFiles, FilenameBuilder_i * pFilenameBuilder )
{
SettingsFormatterState_t tState(fp);
tIndex.GetSettings().DumpReadable ( tState, tEmbeddedFiles, pFilenameBuilder );
CSphFieldFilterSettings tFieldFilter;
tIndex.GetFieldFilterSettings ( tFieldFilter );
tFieldFilter.DumpReadable ( tState, tEmbeddedFiles, pFilenameBuilder );
KillListTargets_c tKlistTargets;
CSphString sWarning;
if ( !tIndex.LoadKillList ( nullptr, tKlistTargets, sWarning ) )
tKlistTargets.m_dTargets.Reset();
tKlistTargets.DumpReadable ( tState, tEmbeddedFiles, pFilenameBuilder );
auto pTokenizer = tIndex.GetTokenizer();
if ( pTokenizer )
pTokenizer->GetSettings().DumpReadable ( tState, tEmbeddedFiles, pFilenameBuilder );
auto pDict = tIndex.GetDictionary();
if ( pDict )
pDict->GetSettings().DumpReadable ( tState, tEmbeddedFiles, pFilenameBuilder );
tIndex.GetMutableSettings().m_tFileAccess.DumpReadable ( tState, tEmbeddedFiles, pFilenameBuilder );
}
void DumpSettings ( StringBuilder_c & tBuf, const CSphIndex & tIndex, FilenameBuilder_i * pFilenameBuilder )
{
SettingsFormatterState_t tState(tBuf);
SettingsFormatter_c tFormatter ( tState, "", " = ", "", "\n" );
FormatAllSettings ( tIndex, tFormatter, pFilenameBuilder );
}
void DumpSettingsCfg ( FILE * fp, const CSphIndex & tIndex, FilenameBuilder_i * pFilenameBuilder )
{
SettingsFormatterState_t tState(fp);
SettingsFormatter_c tFormatter ( tState, "\t", " = ", "", "\n" );
FormatAllSettings ( tIndex, tFormatter, pFilenameBuilder );
}
static void DumpCreateTable ( StringBuilder_c & tBuf, const CSphIndex & tIndex, FilenameBuilder_i * pFilenameBuilder )
{
SettingsFormatterState_t tState(tBuf);
SettingsFormatter_c tFormatter ( tState, "", "='", "'", " ", false, true );
FormatAllSettings ( tIndex, tFormatter, pFilenameBuilder );
}
//////////////////////////////////////////////////////////////////////////
static void AddWarning ( StrVec_t & dWarnings, const CSphString & sWarning )
{
if ( !sWarning.IsEmpty() )
dWarnings.Add(sWarning);
}
bool sphFixupIndexSettings ( CSphIndex * pIndex, const CSphConfigSection & hIndex, bool bStripPath, FilenameBuilder_i * pFilenameBuilder, StrVec_t & dWarnings, CSphString & sError )
{
bool bTokenizerSpawned = false;
if ( !pIndex->GetTokenizer () )
{
CSphTokenizerSettings tSettings;
CSphString sWarning;
tSettings.Setup ( hIndex, sWarning );
AddWarning ( dWarnings, sWarning );
TokenizerRefPtr_c pTokenizer = Tokenizer::Create ( tSettings, nullptr, pFilenameBuilder, dWarnings, sError );
if ( !pTokenizer )
return false;
bTokenizerSpawned = true;
pIndex->SetTokenizer ( pTokenizer );
}
if ( !pIndex->GetDictionary () )
{
CSphDictSettings tSettings;
CSphString sWarning;
tSettings.Setup ( hIndex, pFilenameBuilder, sWarning );
AddWarning ( dWarnings, sWarning );
DictRefPtr_c pDict = sphCreateDictionaryCRC ( tSettings, nullptr, pIndex->GetTokenizer (), pIndex->GetName(), bStripPath, pIndex->GetSettings().m_iSkiplistBlockSize, pFilenameBuilder, sError );
if ( !pDict )
return false;
pIndex->SetDictionary ( std::move ( pDict ) );
}
if ( bTokenizerSpawned )
Tokenizer::AddToMultiformFilterTo ( pIndex->ModifyTokenizer(), pIndex->GetDictionary ()->GetMultiWordforms () );
if ( !pIndex->GetFieldFilter() )
{
CSphFieldFilterSettings tFilterSettings;
bool bSetupOk = tFilterSettings.Setup ( hIndex, sError );
// treat warnings as errors
if ( !sError.IsEmpty() )
return false;
std::unique_ptr<ISphFieldFilter> pFieldFilter;
if ( bSetupOk )
{
CSphString sWarning;
pFieldFilter = sphCreateRegexpFilter ( tFilterSettings, sWarning );
AddWarning ( dWarnings, sWarning );
}
CSphString sWarning;
sphSpawnFilterICU ( pFieldFilter, pIndex->GetSettings(), pIndex->GetTokenizer()->GetSettings(), pIndex->GetName(), sWarning );
SpawnFilterJieba ( pFieldFilter, pIndex->GetSettings(), pIndex->GetTokenizer()->GetSettings(), pIndex->GetName(), pFilenameBuilder, sWarning );
AddWarning ( dWarnings, sWarning );
pIndex->SetFieldFilter ( std::move ( pFieldFilter ) );
}
// exact words fixup, needed for RT indexes
// cloned from indexer, remove somehow?
DictRefPtr_c pDict = pIndex->GetDictionary();
assert ( pDict );
CSphIndexSettings tSettings = pIndex->GetSettings ();
bool bNeedExact = ( pDict->HasMorphology() || pDict->GetWordformsFileInfos().GetLength() || pIndex->GetMutableSettings().m_iExpandKeywords );
if ( tSettings.m_bIndexExactWords && !bNeedExact )
{
tSettings.m_bIndexExactWords = false;
pIndex->Setup ( tSettings );
dWarnings.Add ( "no morphology, index_exact_words=1 has no effect, ignoring" );
}
if ( !tSettings.m_bIndexExactWords && ForceExactWords ( pDict->GetSettings().m_bWordDict, pDict->HasMorphology(), tSettings.RawMinPrefixLen(), tSettings.m_iMinInfixLen, pDict->GetSettings().m_sMorphFields.IsEmpty() ) )
{
tSettings.m_bIndexExactWords = true;
pIndex->Setup ( tSettings );
dWarnings.Add ( "dict=keywords and prefixes and morphology enabled, forcing index_exact_words=1" );
}
pIndex->PostSetup();
return true;
}
bool ForceExactWords ( bool bWordDict, bool bHasMorphology, int iMinPrefixLen, int iMinInfixLen, bool bMorphFieldsEmpty )
{
return ( bWordDict && bHasMorphology && ( iMinPrefixLen || iMinInfixLen || !bMorphFieldsEmpty ) );
}
static RtTypedAttr_t g_dTypeNames[] =
{
{ SPH_ATTR_INTEGER, "integer" },
{ SPH_ATTR_BIGINT, "bigint" },
{ SPH_ATTR_FLOAT, "float" },
{ SPH_ATTR_BOOL, "bool" },
{ SPH_ATTR_UINT32SET, "multi" },
{ SPH_ATTR_INT64SET, "multi64" },
{ SPH_ATTR_JSON, "json" },
{ SPH_ATTR_STRING, "string" },
{ SPH_ATTR_STRINGPTR, "string" },
{ SPH_ATTR_TIMESTAMP, "timestamp" },
{ SPH_ATTR_FLOAT_VECTOR, "float_vector" }
};
static CSphString GetAttrTypeName ( const CSphColumnInfo & tAttr )
{
if ( tAttr.m_eAttrType==SPH_ATTR_INTEGER && tAttr.m_tLocator.m_iBitCount!=32 )
{
CSphString sRes;
sRes.SetSprintf( "bit(%d)", tAttr.m_tLocator.m_iBitCount );
return sRes;
}
for ( const auto & i : g_dTypeNames )
if ( tAttr.m_eAttrType==i.m_eType )
return i.m_szName;
assert ( 0 && "Internal error: unknown attr type" );
return "";
}
static void AddFieldSettings ( StringBuilder_c & sRes, const CSphColumnInfo & tField )
{
DWORD uAllSet = CSphColumnInfo::FIELD_INDEXED | CSphColumnInfo::FIELD_STORED;
if ( (tField.m_uFieldFlags & uAllSet) != uAllSet )
{
if ( tField.m_uFieldFlags & CSphColumnInfo::FIELD_INDEXED )
sRes << " indexed";
if ( tField.m_uFieldFlags & CSphColumnInfo::FIELD_STORED )
sRes << " stored";
}
}
static void AddStorageSettings ( StringBuilder_c & sRes, const CSphColumnInfo & tAttr, const CSphIndex & tIndex, bool bField, int iNumColumnar )
{
if ( !bField && tAttr.m_eAttrType==SPH_ATTR_STRING )
sRes << " attribute";
bool bColumnar = CombineEngines ( tIndex.GetSettings().m_eEngine, tAttr.m_eEngine )==AttrEngine_e::COLUMNAR;
if ( bColumnar )
{
if ( tAttr.m_eAttrType!=SPH_ATTR_JSON && !(tAttr.m_uAttrFlags & CSphColumnInfo::ATTR_STORED) && iNumColumnar>1 )
sRes << " fast_fetch='0'";
if ( tAttr.m_eAttrType==SPH_ATTR_STRING && !(tAttr.m_uAttrFlags & CSphColumnInfo::ATTR_COLUMNAR_HASHES) )
sRes << " hash='0'";
}
}
static void AddEngineSettings ( StringBuilder_c & sRes, const CSphColumnInfo & tAttr )
{
if ( tAttr.m_eEngine==AttrEngine_e::COLUMNAR )
sRes << " engine='columnar'";
else if ( tAttr.m_eEngine==AttrEngine_e::ROWWISE )
sRes << " engine='rowwise'";
}
static void AddSISettings ( StringBuilder_c & sRes, const CSphColumnInfo & tAttr )
{
if ( tAttr.IsIndexedSI() )
sRes << " secondary_index='1'";
}
static bool IsDDLToken ( const CSphString & sTok )
{
static const CSphString dTokens[] =
{
"ADD",
"ALTER",
"AS",
"AT",
"ATTRIBUTE",
"BIGINT",
"BIT",
"BOOL",
"CLUSTER",
"COLUMN",
"COLUMNAR",
"CREATE",
"DOUBLE",
"DROP",
"ENGINE",
"EXISTS",
"FAST_FETCH",
"FLOAT",
"FROM",
"FUNCTION",
"HASH",
"IMPORT",
"INDEXED",
"INTEGER",
"INT",
"IF",
"JOIN",
"JSON",
"KILLLIST_TARGET",
"LIKE",
"MULTI",
"MULTI64",
"NOT",
"PLUGIN",
"REBUILD",
"RECONFIGURE",
"RETURNS",
"RTINDEX",
"SECONDARY",
"SONAME",
"STORED",
"STRING",
"TABLE",
"TEXT",
"TIMESTAMP",
"TYPE",
"UINT",
"UPDATE"
};
CSphString sToken = sTok;
sToken.ToUpper();
return any_of ( dTokens, [&sToken] ( const auto& i ) { return i == sToken; } );
}
static CSphString FormatCreateTableAttr ( const CSphColumnInfo & tAttr, const CSphIndex * pIndex, int iNumColumnar, bool bQuote )
{
StringBuilder_c sRes;
CSphString sQuotedName;
bQuote |= IsDDLToken ( tAttr.m_sName );
if ( bQuote )
sQuotedName.SetSprintf ( "`%s`", tAttr.m_sName.cstr() );
else
sQuotedName = tAttr.m_sName;
sRes << sQuotedName << " " << GetAttrTypeName(tAttr);
AddStorageSettings ( sRes, tAttr, *pIndex, false, iNumColumnar );
AddEngineSettings ( sRes, tAttr );
AddKNNSettings ( sRes, tAttr );
AddSISettings ( sRes, tAttr );
return sRes.cstr();
}
static CSphString FormatCreateTableField ( const CSphColumnInfo & tField, const CSphIndex * pIndex, const CSphSchema & tSchema, int iNumColumnar, bool bQuote )
{
StringBuilder_c sRes;
CSphString sQuotedName;
bQuote |= IsDDLToken ( tField.m_sName );
if ( bQuote )
sQuotedName.SetSprintf ( "`%s`", tField.m_sName.cstr() );
else
sQuotedName = tField.m_sName;
const CSphColumnInfo * pAttr = tSchema.GetAttr ( tField.m_sName.cstr() );
bool bAttr = pAttr && pAttr->m_eAttrType==SPH_ATTR_STRING;
sRes << sQuotedName << ( bAttr ? " string" : " text" );
AddFieldSettings ( sRes, tField );
if ( bAttr )
{
sRes << " attribute";
AddStorageSettings ( sRes, *pAttr, *pIndex, true, iNumColumnar );
AddEngineSettings ( sRes, *pAttr );
}
return sRes.cstr();
}
CSphString BuildCreateTable ( const CSphString & sName, const CSphIndex * pIndex, const CSphSchema & tSchema )
{
assert ( pIndex );
auto& tSess = session::Info();
bool bQuote = tSess.GetSqlQuoteShowCreate();
int iNumColumnar = 0;
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
if ( tSchema.GetAttr(i).IsColumnar() )
iNumColumnar++;
StringBuilder_c sRes;
sRes << "CREATE TABLE " << ( bQuote ? SphSprintf ( "`%s`", sName.cstr() ) : sName) << " (\n";
CSphVector<const CSphColumnInfo *> dExcludeAttrs;
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
{
const auto & tAttr = tSchema.GetAttr(i);
const auto * pField = tSchema.GetField ( tAttr.m_sName.cstr() );
if ( pField && tAttr.m_eAttrType==SPH_ATTR_STRING )
dExcludeAttrs.Add(&tAttr);
}
dExcludeAttrs.Uniq();
const CSphColumnInfo * pId = tSchema.GetAttr("id");
assert(pId);
sRes << FormatCreateTableAttr ( *pId, pIndex, iNumColumnar, bQuote );
for ( int i = 0; i < tSchema.GetFieldsCount(); i++ )
{
sRes << ",\n";
sRes << FormatCreateTableField ( tSchema.GetField(i), pIndex, tSchema, iNumColumnar, bQuote );
}
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = tSchema.GetAttr(i);
if ( sphIsInternalAttr ( tAttr.m_sName ) || tAttr.m_eAttrType==SPH_ATTR_TOKENCOUNT || &tAttr==pId )
continue;
if ( dExcludeAttrs.BinarySearch(&tAttr) )
continue;
sRes << ",\n";
sRes << FormatCreateTableAttr ( tAttr, pIndex, iNumColumnar, bQuote );
}
sRes << "\n)";
StringBuilder_c tBuf;
std::unique_ptr<FilenameBuilder_i> pFilenameBuilder;
if ( g_fnCreateFilenameBuilder )
pFilenameBuilder = g_fnCreateFilenameBuilder ( pIndex->GetName() );
DumpCreateTable ( tBuf, *pIndex, pFilenameBuilder.get() );
if ( tBuf.GetLength() )
sRes << " " << tBuf.cstr();
CSphString sResult = sRes.cstr();
return sResult;
}
const char * FileAccessName ( FileAccess_e eValue )
{
switch ( eValue )
{
case FileAccess_e::FILE : return "file";
case FileAccess_e::MMAP : return "mmap";
case FileAccess_e::MMAP_PREREAD : return "mmap_preread";
case FileAccess_e::MLOCK : return "mlock";
case FileAccess_e::UNKNOWN : return "unknown";
default:
assert ( 0 && "Not all values of FileAccess_e named");
return "";
}
}
FileAccess_e ParseFileAccess ( CSphString sVal )
{
if ( sVal=="file" ) return FileAccess_e::FILE;
if ( sVal=="mmap" ) return FileAccess_e::MMAP;
if ( sVal=="mmap_preread" ) return FileAccess_e::MMAP_PREREAD;
if ( sVal=="mlock" ) return FileAccess_e::MLOCK;
return FileAccess_e::UNKNOWN;
}
int ParseKeywordExpansion ( const char * sValue )
{
if ( !sValue || *sValue=='\0' )
return KWE_DISABLED;
int iOpt = KWE_DISABLED;
while ( sValue && *sValue )
{
if ( !sphIsAlpha ( *sValue ) )
{
sValue++;
continue;
}
if ( *sValue>='0' && *sValue<='9' )
{
int iVal = atoi ( sValue );
if ( iVal!=0 )
iOpt = KWE_ENABLED;
break;
}
if ( sphStrMatchStatic ( "exact", sValue ) )
{
iOpt |= KWE_EXACT;
sValue += 5;
} else if ( sphStrMatchStatic ( "star", sValue ) )
{
iOpt |= KWE_STAR;
sValue += 4;
} else
{
sValue++;
}
}
return iOpt;
}
const char * GetMutableName ( MutableName_e eName )
{
switch ( eName )
{
case MutableName_e::EXPAND_KEYWORDS: return "expand_keywords";
case MutableName_e::RT_MEM_LIMIT: return "rt_mem_limit";
case MutableName_e::PREOPEN: return "preopen";
case MutableName_e::ACCESS_PLAIN_ATTRS: return "access_plain_attrs";
case MutableName_e::ACCESS_BLOB_ATTRS: return "access_blob_attrs";
case MutableName_e::ACCESS_DOCLISTS: return "access_doclists";
case MutableName_e::ACCESS_HITLISTS: return "access_hitlists";
case MutableName_e::ACCESS_DICT: return "access_dict";
case MutableName_e::READ_BUFFER_DOCS: return "read_buffer_docs";
case MutableName_e::READ_BUFFER_HITS: return "read_buffer_hits";
case MutableName_e::OPTIMIZE_CUTOFF: return "optimize_cutoff";
case MutableName_e::GLOBAL_IDF: return "global_idf";
default: assert ( 0 && "Invalid mutable option" ); return "";
}
}
static bool GetFileAccess ( const CSphString & sVal, const char * sKey, bool bList, FileAccess_e & eRes )
{
// should use original value as default due to deprecated options
if ( sVal.IsEmpty() )
return false;
FileAccess_e eParsed = ParseFileAccess ( sVal.cstr() );
if ( eParsed==FileAccess_e::UNKNOWN )
{
sphWarning( "%s unknown value %s, use default %s", sKey, sVal.cstr(), FileAccessName( eRes ) );
return false;
}
// but then check might reset invalid value to real default
if ( ( bList && eParsed==FileAccess_e::MMAP_PREREAD) ||
( !bList && eParsed==FileAccess_e::FILE) )
{
sphWarning( "%s invalid value %s, use default %s", sKey, FileAccessName ( eParsed ), FileAccessName ( eRes ));
return false;
}
eRes = eParsed;
return true;
}
FileAccess_e GetFileAccess ( const CSphConfigSection & hIndex, const char * sKey, bool bList, FileAccess_e eDefault )
{
FileAccess_e eRes = eDefault;
if ( !GetFileAccess ( hIndex.GetStr ( sKey ), sKey, bList, eRes ) )
return eDefault;
return eRes;
}
static void GetFileAccess ( const JsonObj_c & tSetting, MutableName_e eName, bool bList, FileAccess_e & eRes, CSphBitvec & dLoaded )
{
const char * sName = GetMutableName ( eName );
CSphString sError;
JsonObj_c tVal = tSetting.GetStrItem ( sName, sError, true );
if ( !tVal )
{
if ( !sError.IsEmpty() )
sphWarning ( "%s", sError.cstr() );
return;
}
if ( !GetFileAccess ( tVal.StrVal(), sName, bList, eRes ) )
return;
dLoaded.BitSet ( (int)eName );
}
static void GetFileAccess ( const CSphConfigSection & hIndex, MutableName_e eName, bool bList, FileAccess_e & eRes, CSphBitvec & dLoaded )
{
const char * sName = GetMutableName ( eName );
if ( !GetFileAccess ( hIndex.GetStr ( sName ), sName, bList, eRes ) )
return;
dLoaded.BitSet ( (int)eName );
}
static const int g_iOptimizeCutoff = 1;
MutableIndexSettings_c::MutableIndexSettings_c()
: m_iExpandKeywords { KWE_DISABLED }
, m_iMemLimit { DEFAULT_RT_MEM_LIMIT }
, m_iOptimizeCutoff ( g_iOptimizeCutoff )
, m_dLoaded ( (int)MutableName_e::TOTAL )
{
#if !_WIN32
m_bPreopen = true;
#else
m_bPreopen = false;
#endif
}
static int64_t GetMemLimit ( int64_t iMemLimit, StrVec_t * pWarnings )
{
if ( iMemLimit<128 * 1024 )
{
if ( pWarnings )
pWarnings->Add ( "rt_mem_limit extremely low, using 128K instead" );
else
sphWarning ( "rt_mem_limit extremely low, using 128K instead" );
iMemLimit = 128 * 1024;
} else if ( iMemLimit<8 * 1024 * 1024 )
{
if ( pWarnings )
pWarnings->Add ( "rt_mem_limit very low (under 8 MB)" );
else
sphWarning ( "rt_mem_limit very low (under 8 MB)" );
}
return iMemLimit;
}
bool MutableIndexSettings_c::Load ( const char * sFileName, const char * sIndexName )
{
CSphString sError;
CSphAutofile tReader;
int iFD = tReader.Open ( sFileName, SPH_O_READ, sError );
if ( iFD<0 ) // mutable settings is optional file - no need to fail
return true;
int64_t iSize = tReader.GetSize();
if ( !iSize )
return true;
CSphFixedVector<BYTE> dBuf ( iSize+1 );
if ( !tReader.Read ( dBuf.Begin(), iSize, sError ) )
{
sphWarning ( "table %s, error: %s", sIndexName, sError.cstr() );
return false;
}
dBuf[iSize] = '\0';
JsonObj_c tParser ( (const char *)dBuf.Begin() );
if ( !tParser )
return false;
// read values
JsonObj_c tExpand = tParser.GetStrItem ( "expand_keywords", sError, true );
if ( tExpand )
{
m_iExpandKeywords = ParseKeywordExpansion ( tExpand.StrVal().cstr() );
m_dLoaded.BitSet ( (int)MutableName_e::EXPAND_KEYWORDS );
} else if ( !sError.IsEmpty() )
{
sphWarning ( "table %s: %s", sIndexName, sError.cstr() );
sError = "";
}
JsonObj_c tMemLimit = tParser.GetIntItem ( "rt_mem_limit", sError, true );
if ( tMemLimit )
{
m_iMemLimit = GetMemLimit ( tMemLimit.IntVal(), nullptr );
m_dLoaded.BitSet ( (int)MutableName_e::RT_MEM_LIMIT );
} else if ( !sError.IsEmpty() )
{
sphWarning ( "table %s: %s", sIndexName, sError.cstr() );
sError = "";
}
JsonObj_c tPreopen = tParser.GetBoolItem ( "preopen", sError, true );
if ( tPreopen )
{
m_bPreopen = tPreopen.BoolVal() || MutableIndexSettings_c::GetDefaults().m_bPreopen;
m_dLoaded.BitSet ( (int)MutableName_e::PREOPEN );
} else if ( !sError.IsEmpty() )
{
sphWarning ( "table %s: %s", sIndexName, sError.cstr() );
sError = "";
}
GetFileAccess( tParser, MutableName_e::ACCESS_PLAIN_ATTRS, false, m_tFileAccess.m_eAttr, m_dLoaded );
GetFileAccess( tParser, MutableName_e::ACCESS_BLOB_ATTRS, false, m_tFileAccess.m_eBlob, m_dLoaded );
GetFileAccess( tParser, MutableName_e::ACCESS_DOCLISTS, true, m_tFileAccess.m_eDoclist, m_dLoaded );
GetFileAccess( tParser, MutableName_e::ACCESS_HITLISTS, true, m_tFileAccess.m_eHitlist, m_dLoaded );
GetFileAccess( tParser, MutableName_e::ACCESS_DICT, false, m_tFileAccess.m_eDict, m_dLoaded );
JsonObj_c tReadBuffer = tParser.GetIntItem ( "read_buffer_docs", sError, true );
if ( tReadBuffer )
{
m_tFileAccess.m_iReadBufferDocList = GetReadBuffer ( tReadBuffer.IntVal() );
m_dLoaded.BitSet ( (int)MutableName_e::READ_BUFFER_DOCS );
} else if ( !sError.IsEmpty() )
{
sphWarning ( "table %s: %s", sIndexName, sError.cstr() );
sError = "";
}
tReadBuffer = tParser.GetIntItem ( "read_buffer_hits", sError, true );
if ( tReadBuffer )
{
m_tFileAccess.m_iReadBufferHitList = GetReadBuffer ( tReadBuffer.IntVal() );
m_dLoaded.BitSet ( (int)MutableName_e::READ_BUFFER_HITS );
} else if ( !sError.IsEmpty() )
{
sphWarning ( "table %s: %s", sIndexName, sError.cstr() );
sError = "";
}
JsonObj_c tOptimizeCutoff = tParser.GetIntItem ( "optimize_cutoff", sError, true );
if ( tOptimizeCutoff )
{
m_iOptimizeCutoff = tOptimizeCutoff.IntVal();
m_iOptimizeCutoff = Max ( m_iOptimizeCutoff, 1 );
m_dLoaded.BitSet ( (int)MutableName_e::OPTIMIZE_CUTOFF );
} else if ( !sError.IsEmpty() )
{
sphWarning ( "table %s: %s", sIndexName, sError.cstr() );
sError = "";
}
JsonObj_c tGlobalIdf = tParser.GetStrItem ( "global_idf", sError, true );
if ( tGlobalIdf )
{
m_sGlobalIDFPath = tGlobalIdf.StrVal();
m_dLoaded.BitSet ( (int)MutableName_e::GLOBAL_IDF );
} else if ( !sError.IsEmpty() )
{
sphWarning ( "table %s: %s", sIndexName, sError.cstr() );
sError = "";
}
m_bNeedSave = true;
return true;
}
void MutableIndexSettings_c::Load ( const CSphConfigSection & hIndex, bool bNeedSave, StrVec_t * pWarnings )
{
m_bNeedSave |= bNeedSave;
if ( hIndex.Exists ( "expand_keywords" ) )
{
m_iExpandKeywords = ParseKeywordExpansion ( hIndex.GetStr( "expand_keywords" ).cstr() );
m_dLoaded.BitSet ( (int)MutableName_e::EXPAND_KEYWORDS );
}
// RAM chunk size
if ( hIndex.Exists ( "rt_mem_limit" ) )
{
m_iMemLimit = GetMemLimit ( hIndex.GetSize64 ( "rt_mem_limit", DEFAULT_RT_MEM_LIMIT ), pWarnings );
m_dLoaded.BitSet ( (int)MutableName_e::RT_MEM_LIMIT );
}
if ( hIndex.Exists ( "preopen" ) )
{
m_bPreopen = hIndex.GetBool ( "preopen", false ) || MutableIndexSettings_c::GetDefaults().m_bPreopen;
m_dLoaded.BitSet ( (int)MutableName_e::PREOPEN );
}
// DEPRICATED - remove these 2 options
if ( hIndex.GetBool ( "mlock", false ) )
{
m_tFileAccess.m_eAttr = FileAccess_e::MLOCK;
m_tFileAccess.m_eBlob = FileAccess_e::MLOCK;
m_dLoaded.BitSet ( (int)MutableName_e::ACCESS_PLAIN_ATTRS );
m_dLoaded.BitSet ( (int)MutableName_e::ACCESS_BLOB_ATTRS );
}
if ( hIndex.Exists ( "ondisk_attrs" ) )
{
bool bOnDiskAttrs = hIndex.GetBool ( "ondisk_attrs", false );
bool bOnDiskPools = ( hIndex.GetStr ( "ondisk_attrs" )=="pool" );
if ( bOnDiskAttrs || bOnDiskPools )
{
m_tFileAccess.m_eAttr = FileAccess_e::MMAP;
m_dLoaded.BitSet ( (int)MutableName_e::ACCESS_PLAIN_ATTRS );
}
if ( bOnDiskPools )
{
m_tFileAccess.m_eBlob = FileAccess_e::MMAP;
m_dLoaded.BitSet ( (int)MutableName_e::ACCESS_BLOB_ATTRS );
}
}
// need to keep value from deprecated options for some time - use it as defaults on parse for now
GetFileAccess( hIndex, MutableName_e::ACCESS_PLAIN_ATTRS, false, m_tFileAccess.m_eAttr, m_dLoaded );
GetFileAccess( hIndex, MutableName_e::ACCESS_BLOB_ATTRS, false, m_tFileAccess.m_eBlob, m_dLoaded );
GetFileAccess( hIndex, MutableName_e::ACCESS_DOCLISTS, true, m_tFileAccess.m_eDoclist, m_dLoaded );
GetFileAccess( hIndex, MutableName_e::ACCESS_HITLISTS, true, m_tFileAccess.m_eHitlist, m_dLoaded );
GetFileAccess( hIndex, MutableName_e::ACCESS_DICT, false, m_tFileAccess.m_eDict, m_dLoaded );
if ( hIndex.Exists ( "read_buffer_docs" ) )
{
m_tFileAccess.m_iReadBufferDocList = GetReadBuffer ( hIndex.GetSize ( "read_buffer_docs", m_tFileAccess.m_iReadBufferDocList ) );
m_dLoaded.BitSet ( (int)MutableName_e::READ_BUFFER_DOCS );
}
if ( hIndex.Exists ( "read_buffer_hits" ) )
{
m_tFileAccess.m_iReadBufferHitList = GetReadBuffer ( hIndex.GetSize ( "read_buffer_hits", m_tFileAccess.m_iReadBufferHitList ) );
m_dLoaded.BitSet ( (int)MutableName_e::READ_BUFFER_HITS );
}
if ( hIndex.Exists ( "optimize_cutoff" ) )
{
m_iOptimizeCutoff = hIndex.GetInt ( "optimize_cutoff", g_iOptimizeCutoff );
m_iOptimizeCutoff = Max ( m_iOptimizeCutoff, 1 );
m_dLoaded.BitSet ( (int)MutableName_e::OPTIMIZE_CUTOFF );
}
if ( hIndex.Exists ( "global_idf" ) )
{
m_sGlobalIDFPath = hIndex.GetStr ( "global_idf" );
m_dLoaded.BitSet ( (int)MutableName_e::GLOBAL_IDF );
}
}
static void AddStr ( const CSphBitvec & dLoaded, MutableName_e eName, JsonObj_c & tRoot, const char * sVal )
{
if ( !dLoaded.BitGet ( (int)eName ) )
return;
tRoot.AddStr ( GetMutableName ( eName ), sVal );
}
static void AddInt ( const CSphBitvec & dLoaded, MutableName_e eName, JsonObj_c & tRoot, int64_t iVal )
{
if ( !dLoaded.BitGet ( (int)eName ) )
return;
tRoot.AddInt ( GetMutableName ( eName ), iVal );
}
static const char * GetExpandKwName ( int iExpandKeywords )
{
if ( ( iExpandKeywords & KWE_ENABLED )==KWE_ENABLED )
return "1";
else if ( ( iExpandKeywords & KWE_EXACT )==KWE_EXACT )
return "exact";
else if ( ( iExpandKeywords & KWE_STAR )==KWE_STAR )
return "star";
else
return "0";
}
bool MutableIndexSettings_c::Save ( CSphString & sBuf ) const
{
if ( !m_bNeedSave )
return false;
JsonObj_c tRoot;
if ( m_dLoaded.BitGet ( (int)MutableName_e::EXPAND_KEYWORDS ) )
tRoot.AddStr ( "expand_keywords", GetExpandKwName ( m_iExpandKeywords ) );
AddInt ( m_dLoaded, MutableName_e::RT_MEM_LIMIT, tRoot, m_iMemLimit );
if ( m_dLoaded.BitGet ( (int)MutableName_e::PREOPEN ) )
tRoot.AddBool ( "preopen", m_bPreopen );
AddStr ( m_dLoaded, MutableName_e::ACCESS_PLAIN_ATTRS, tRoot, FileAccessName ( m_tFileAccess.m_eAttr ) );
AddStr ( m_dLoaded, MutableName_e::ACCESS_BLOB_ATTRS, tRoot, FileAccessName ( m_tFileAccess.m_eBlob ) );
AddStr ( m_dLoaded, MutableName_e::ACCESS_DOCLISTS, tRoot, FileAccessName ( m_tFileAccess.m_eDoclist ) );
AddStr ( m_dLoaded, MutableName_e::ACCESS_HITLISTS, tRoot, FileAccessName ( m_tFileAccess.m_eHitlist ) );
AddStr ( m_dLoaded, MutableName_e::ACCESS_DICT, tRoot, FileAccessName ( m_tFileAccess.m_eDict ) );
AddInt ( m_dLoaded, MutableName_e::READ_BUFFER_DOCS, tRoot, m_tFileAccess.m_iReadBufferDocList );
AddInt ( m_dLoaded, MutableName_e::READ_BUFFER_HITS, tRoot, m_tFileAccess.m_iReadBufferHitList );
AddInt ( m_dLoaded, MutableName_e::OPTIMIZE_CUTOFF, tRoot, m_iOptimizeCutoff );
AddStr ( m_dLoaded, MutableName_e::GLOBAL_IDF, tRoot, m_sGlobalIDFPath.cstr() );
sBuf = tRoot.AsString ( true );
return true;
}
void MutableIndexSettings_c::Combine ( const MutableIndexSettings_c & tOther )
{
if ( tOther.m_dLoaded.BitGet ( (int)MutableName_e::EXPAND_KEYWORDS ) )
{
m_iExpandKeywords = tOther.m_iExpandKeywords;
m_dLoaded.BitSet ( (int)MutableName_e::EXPAND_KEYWORDS );
}
if ( tOther.m_dLoaded.BitGet ( (int)MutableName_e::RT_MEM_LIMIT ) )
{
m_iMemLimit = tOther.m_iMemLimit;
m_dLoaded.BitSet ( (int)MutableName_e::RT_MEM_LIMIT );
}
if ( tOther.m_dLoaded.BitGet ( (int)MutableName_e::PREOPEN ) )
{
m_bPreopen = tOther.m_bPreopen;
m_dLoaded.BitSet ( (int)MutableName_e::PREOPEN );
}
if ( tOther.m_dLoaded.BitGet ( (int)MutableName_e::ACCESS_PLAIN_ATTRS ) )
{
m_tFileAccess.m_eAttr = tOther.m_tFileAccess.m_eAttr;
m_dLoaded.BitSet ( (int)MutableName_e::ACCESS_PLAIN_ATTRS );
}
if ( tOther.m_dLoaded.BitGet ( (int)MutableName_e::ACCESS_BLOB_ATTRS ) )
{
m_tFileAccess.m_eBlob = tOther.m_tFileAccess.m_eBlob;
m_dLoaded.BitSet ( (int)MutableName_e::ACCESS_BLOB_ATTRS );
}
if ( tOther.m_dLoaded.BitGet ( (int)MutableName_e::ACCESS_DOCLISTS ) )
{
m_tFileAccess.m_eDoclist = tOther.m_tFileAccess.m_eDoclist;
m_dLoaded.BitSet ( (int)MutableName_e::ACCESS_DOCLISTS );
}
if ( tOther.m_dLoaded.BitGet ( (int)MutableName_e::ACCESS_HITLISTS ) )
{
m_tFileAccess.m_eHitlist = tOther.m_tFileAccess.m_eHitlist;
m_dLoaded.BitSet ( (int)MutableName_e::ACCESS_HITLISTS );
}
if ( tOther.m_dLoaded.BitGet ( (int)MutableName_e::ACCESS_DICT ) )
{
m_tFileAccess.m_eDict = tOther.m_tFileAccess.m_eDict;
m_dLoaded.BitSet ( (int)MutableName_e::ACCESS_DICT );
}
if ( tOther.m_dLoaded.BitGet ( (int)MutableName_e::READ_BUFFER_DOCS ) )
{
m_tFileAccess.m_iReadBufferDocList = tOther.m_tFileAccess.m_iReadBufferDocList;
m_dLoaded.BitSet ( (int)MutableName_e::READ_BUFFER_DOCS );
}
if ( tOther.m_dLoaded.BitGet ( (int)MutableName_e::READ_BUFFER_HITS ) )
{
m_tFileAccess.m_iReadBufferHitList = tOther.m_tFileAccess.m_iReadBufferHitList;
m_dLoaded.BitSet ( (int)MutableName_e::READ_BUFFER_HITS );
}
if ( tOther.m_dLoaded.BitGet ( (int)MutableName_e::OPTIMIZE_CUTOFF ) )
{
m_iOptimizeCutoff = tOther.m_iOptimizeCutoff;
m_dLoaded.BitSet ( (int)MutableName_e::OPTIMIZE_CUTOFF );
}
if ( tOther.m_dLoaded.BitGet ( (int)MutableName_e::GLOBAL_IDF ) )
{
m_sGlobalIDFPath = tOther.m_sGlobalIDFPath;
m_dLoaded.BitSet ( (int)MutableName_e::GLOBAL_IDF );
}
}
MutableIndexSettings_c & MutableIndexSettings_c::GetDefaults ()
{
static MutableIndexSettings_c tMutableDefaults;
return tMutableDefaults;
}
static bool FormatCond ( bool bNeedSave, const CSphBitvec & dLoaded, MutableName_e eName, bool bNotEq )
{
return ( ( bNeedSave && dLoaded.BitGet ( (int)eName ) ) || ( !bNeedSave && bNotEq ) );
}
void MutableIndexSettings_c::Format ( SettingsFormatter_c & tOut, FilenameBuilder_i * ) const
{
const MutableIndexSettings_c & tDefaults = GetDefaults ();
tOut.Add ( GetMutableName ( MutableName_e::EXPAND_KEYWORDS ), GetExpandKwName ( m_iExpandKeywords ),
FormatCond ( m_bNeedSave, m_dLoaded, MutableName_e::EXPAND_KEYWORDS, m_iExpandKeywords!=tDefaults.m_iExpandKeywords ) );
tOut.Add ( GetMutableName ( MutableName_e::RT_MEM_LIMIT ), m_iMemLimit,
FormatCond ( m_bNeedSave, m_dLoaded, MutableName_e::RT_MEM_LIMIT, m_iMemLimit!=tDefaults.m_iMemLimit ) );
tOut.Add ( GetMutableName ( MutableName_e::PREOPEN ), m_bPreopen,
FormatCond ( m_bNeedSave, m_dLoaded, MutableName_e::PREOPEN, m_bPreopen!=tDefaults.m_bPreopen ) );
tOut.Add ( GetMutableName ( MutableName_e::ACCESS_PLAIN_ATTRS ), FileAccessName ( m_tFileAccess.m_eAttr ),
FormatCond ( m_bNeedSave, m_dLoaded, MutableName_e::ACCESS_PLAIN_ATTRS, m_tFileAccess.m_eAttr!=tDefaults.m_tFileAccess.m_eAttr ) );
tOut.Add ( GetMutableName ( MutableName_e::ACCESS_BLOB_ATTRS ), FileAccessName ( m_tFileAccess.m_eBlob ),
FormatCond ( m_bNeedSave, m_dLoaded, MutableName_e::ACCESS_BLOB_ATTRS, m_tFileAccess.m_eBlob!=tDefaults.m_tFileAccess.m_eBlob ) );
tOut.Add ( GetMutableName ( MutableName_e::ACCESS_DOCLISTS ), FileAccessName ( m_tFileAccess.m_eDoclist ),
FormatCond ( m_bNeedSave, m_dLoaded, MutableName_e::ACCESS_DOCLISTS, m_tFileAccess.m_eDoclist!=tDefaults.m_tFileAccess.m_eDoclist ) );
tOut.Add ( GetMutableName ( MutableName_e::ACCESS_HITLISTS ), FileAccessName ( m_tFileAccess.m_eHitlist ),
FormatCond ( m_bNeedSave, m_dLoaded, MutableName_e::ACCESS_HITLISTS, m_tFileAccess.m_eHitlist!=tDefaults.m_tFileAccess.m_eHitlist ) );
tOut.Add ( GetMutableName ( MutableName_e::ACCESS_DICT ), FileAccessName ( m_tFileAccess.m_eDict ),
FormatCond ( m_bNeedSave, m_dLoaded, MutableName_e::ACCESS_DICT, m_tFileAccess.m_eDict!=tDefaults.m_tFileAccess.m_eDict ) );
tOut.Add ( GetMutableName ( MutableName_e::READ_BUFFER_DOCS ), m_tFileAccess.m_iReadBufferDocList,
FormatCond ( m_bNeedSave, m_dLoaded, MutableName_e::READ_BUFFER_DOCS, m_tFileAccess.m_iReadBufferDocList!=tDefaults.m_tFileAccess.m_iReadBufferDocList ) );
tOut.Add ( GetMutableName ( MutableName_e::READ_BUFFER_HITS ), m_tFileAccess.m_iReadBufferHitList,
FormatCond ( m_bNeedSave, m_dLoaded, MutableName_e::READ_BUFFER_HITS, m_tFileAccess.m_iReadBufferHitList!=tDefaults.m_tFileAccess.m_iReadBufferHitList ) );
tOut.Add ( GetMutableName ( MutableName_e::OPTIMIZE_CUTOFF ), m_iOptimizeCutoff,
FormatCond ( m_bNeedSave, m_dLoaded, MutableName_e::OPTIMIZE_CUTOFF, HasSettings() && m_dLoaded.BitGet ( (int)MutableName_e::OPTIMIZE_CUTOFF ) ) );
tOut.Add ( GetMutableName ( MutableName_e::GLOBAL_IDF ), m_sGlobalIDFPath,
FormatCond ( m_bNeedSave, m_dLoaded, MutableName_e::GLOBAL_IDF, HasSettings() && m_dLoaded.BitGet ( (int)MutableName_e::GLOBAL_IDF ) ) );
}
void LoadIndexSettingsJson ( bson::Bson_c tNode, CSphIndexSettings & tSettings )
{
using namespace bson;
tSettings.SetMinPrefixLen ( (int)Int ( tNode.ChildByName ( "min_prefix_len" ) ) );
tSettings.m_iMinInfixLen = (int)Int ( tNode.ChildByName ( "min_infix_len" ) );
tSettings.m_iMaxSubstringLen = (int)Int ( tNode.ChildByName ( "max_substring_len" ) );
tSettings.m_bHtmlStrip = Bool ( tNode.ChildByName ( "strip_html" ) );
tSettings.m_sHtmlIndexAttrs = String ( tNode.ChildByName ( "html_index_attrs" ) );
tSettings.m_sHtmlRemoveElements = String ( tNode.ChildByName ( "html_remove_elements" ) );
tSettings.m_bIndexExactWords = Bool ( tNode.ChildByName ( "index_exact_words" ) );
tSettings.m_eHitless = (ESphHitless)Int ( tNode.ChildByName ( "hitless" ), SPH_HITLESS_NONE );
tSettings.m_eHitFormat = (ESphHitFormat)Int ( tNode.ChildByName ( "hit_format" ), SPH_HIT_FORMAT_PLAIN );
tSettings.m_bIndexSP = Bool ( tNode.ChildByName ( "index_sp" ) );
tSettings.m_sZones = String ( tNode.ChildByName ( "zones" ) );
tSettings.m_iBoundaryStep = (int)Int ( tNode.ChildByName ( "boundary_step" ) );
tSettings.m_iStopwordStep = (int)Int ( tNode.ChildByName ( "stopword_step" ), 1 );
tSettings.m_iOvershortStep = (int)Int ( tNode.ChildByName ( "overshort_step" ), 1 );
tSettings.m_iEmbeddedLimit = (int)Int ( tNode.ChildByName ( "embedded_limit" ) );
tSettings.m_eBigramIndex = (ESphBigram)Int ( tNode.ChildByName ( "bigram_index" ), SPH_BIGRAM_NONE );
tSettings.m_sBigramWords = String ( tNode.ChildByName ( "bigram_words" ) );
tSettings.m_bIndexFieldLens = Bool ( tNode.ChildByName ( "index_field_lens" ) );
tSettings.m_ePreprocessor = (Preprocessor_e)Int ( tNode.ChildByName ( "icu" ), (DWORD)Preprocessor_e::NONE );
tSettings.m_sIndexTokenFilter = String ( tNode.ChildByName ( "index_token_filter" ) );
tSettings.m_tBlobUpdateSpace = Int ( tNode.ChildByName ( "blob_update_space" ) );
tSettings.m_iSkiplistBlockSize = (int)Int ( tNode.ChildByName ( "skiplist_block_size" ), 32 );
tSettings.m_sHitlessFiles = String ( tNode.ChildByName ( "hitless_files" ) );
tSettings.m_eEngine = (AttrEngine_e)Int ( tNode.ChildByName ( "engine" ), (DWORD)AttrEngine_e::DEFAULT );
tSettings.m_eDefaultEngine = (AttrEngine_e)Int ( tNode.ChildByName ( "engine_default" ), (DWORD)AttrEngine_e::ROWWISE );
tSettings.m_eJiebaMode = (JiebaMode_e)Int ( tNode.ChildByName ( "jieba_mode" ), (DWORD)JiebaMode_e::DEFAULT );
tSettings.m_bJiebaHMM = Bool ( tNode.ChildByName ( "jieba_hmm" ), true );
tSettings.m_sJiebaUserDictPath = String ( tNode.ChildByName ( "jieba_user_dict_path" ) );
}
void LoadIndexSettings ( CSphIndexSettings & tSettings, CSphReader & tReader, DWORD uVersion )
{
tSettings.SetMinPrefixLen ( tReader.GetDword() );
tSettings.m_iMinInfixLen = tReader.GetDword ();
tSettings.m_iMaxSubstringLen = tReader.GetDword();
tSettings.m_bHtmlStrip = !!tReader.GetByte ();
tSettings.m_sHtmlIndexAttrs = tReader.GetString ();
tSettings.m_sHtmlRemoveElements = tReader.GetString ();
tSettings.m_bIndexExactWords = !!tReader.GetByte ();
tSettings.m_eHitless = (ESphHitless)tReader.GetDword();
tSettings.m_eHitFormat = (ESphHitFormat)tReader.GetDword();
tSettings.m_bIndexSP = !!tReader.GetByte();
tSettings.m_sZones = tReader.GetString();
tSettings.m_iBoundaryStep = (int)tReader.GetDword();
tSettings.m_iStopwordStep = (int)tReader.GetDword();
tSettings.m_iOvershortStep = (int)tReader.GetDword();
tSettings.m_iEmbeddedLimit = (int)tReader.GetDword();
tSettings.m_eBigramIndex = (ESphBigram)tReader.GetByte();
tSettings.m_sBigramWords = tReader.GetString();
tSettings.m_bIndexFieldLens = ( tReader.GetByte()!=0 );
tSettings.m_ePreprocessor = tReader.GetByte()==1 ? Preprocessor_e::ICU : Preprocessor_e::NONE;
tReader.GetString(); // was: RLP context
tSettings.m_sIndexTokenFilter = tReader.GetString();
tSettings.m_tBlobUpdateSpace = tReader.GetOffset();
if ( uVersion<56 )
tSettings.m_iSkiplistBlockSize = 128;
else
tSettings.m_iSkiplistBlockSize = (int)tReader.GetDword();
if ( uVersion>=60 )
tSettings.m_sHitlessFiles = tReader.GetString();
if ( uVersion>=63 )
tSettings.m_eEngine = (AttrEngine_e)tReader.GetDword();
if ( uVersion>=67 )
{
tSettings.m_eJiebaMode = (JiebaMode_e)tReader.GetDword();
tSettings.m_bJiebaHMM = !!tReader.GetByte();
}
}
void SaveIndexSettings ( Writer_i & tWriter, const CSphIndexSettings & tSettings )
{
tWriter.PutDword ( tSettings.RawMinPrefixLen() );
tWriter.PutDword ( tSettings.m_iMinInfixLen );
tWriter.PutDword ( tSettings.m_iMaxSubstringLen );
tWriter.PutByte ( tSettings.m_bHtmlStrip ? 1 : 0 );
tWriter.PutString ( tSettings.m_sHtmlIndexAttrs.cstr () );
tWriter.PutString ( tSettings.m_sHtmlRemoveElements.cstr () );
tWriter.PutByte ( tSettings.m_bIndexExactWords ? 1 : 0 );
tWriter.PutDword ( tSettings.m_eHitless );
tWriter.PutDword ( tSettings.m_eHitFormat );
tWriter.PutByte ( tSettings.m_bIndexSP );
tWriter.PutString ( tSettings.m_sZones );
tWriter.PutDword ( tSettings.m_iBoundaryStep );
tWriter.PutDword ( tSettings.m_iStopwordStep );
tWriter.PutDword ( tSettings.m_iOvershortStep );
tWriter.PutDword ( tSettings.m_iEmbeddedLimit );
tWriter.PutByte ( tSettings.m_eBigramIndex );
tWriter.PutString ( tSettings.m_sBigramWords );
tWriter.PutByte ( tSettings.m_bIndexFieldLens );
tWriter.PutByte ( tSettings.m_ePreprocessor==Preprocessor_e::ICU ? 1 : 0 );
tWriter.PutString(""); // was: RLP context
tWriter.PutString ( tSettings.m_sIndexTokenFilter );
tWriter.PutOffset ( tSettings.m_tBlobUpdateSpace );
tWriter.PutDword ( tSettings.m_iSkiplistBlockSize );
tWriter.PutString ( tSettings.m_sHitlessFiles );
tWriter.PutDword ( (DWORD)tSettings.m_eEngine );
tWriter.PutDword ( (DWORD)tSettings.m_eJiebaMode );
tWriter.PutByte ( tSettings.m_bJiebaHMM ? 1 : 0 );
tWriter.PutString ( tSettings.m_sJiebaUserDictPath );
}
void operator << ( JsonEscapedBuilder & tOut, const CSphIndexSettings & tSettings )
{
auto _ = tOut.ObjectW();
tOut.NamedValNonDefault ( "min_prefix_len", tSettings.RawMinPrefixLen() );
tOut.NamedValNonDefault ( "min_infix_len", tSettings.m_iMinInfixLen );
tOut.NamedValNonDefault ( "max_substring_len", tSettings.m_iMaxSubstringLen );
tOut.NamedValNonDefault ( "strip_html", tSettings.m_bHtmlStrip, false );
tOut.NamedStringNonEmpty ( "html_index_attrs", tSettings.m_sHtmlIndexAttrs );
tOut.NamedStringNonEmpty ( "html_remove_elements", tSettings.m_sHtmlRemoveElements );
tOut.NamedValNonDefault ( "index_exact_words", tSettings.m_bIndexExactWords, false );
tOut.NamedValNonDefault ( "hitless", tSettings.m_eHitless, SPH_HITLESS_NONE );
tOut.NamedValNonDefault ( "hit_format", tSettings.m_eHitFormat, SPH_HIT_FORMAT_PLAIN );
tOut.NamedValNonDefault ( "index_sp", tSettings.m_bIndexSP, false );
tOut.NamedStringNonEmpty ( "zones", tSettings.m_sZones );
tOut.NamedValNonDefault ( "boundary_step", tSettings.m_iBoundaryStep );
tOut.NamedValNonDefault ( "stopword_step", tSettings.m_iStopwordStep, 1 );
tOut.NamedValNonDefault ( "overshort_step", tSettings.m_iOvershortStep, 1 );
tOut.NamedValNonDefault ( "embedded_limit", tSettings.m_iEmbeddedLimit );
tOut.NamedValNonDefault ( "bigram_index", tSettings.m_eBigramIndex, SPH_BIGRAM_NONE );
tOut.NamedStringNonEmpty ( "bigram_words", tSettings.m_sBigramWords );
tOut.NamedValNonDefault ( "index_field_lens", tSettings.m_bIndexFieldLens, false );
tOut.NamedValNonDefault ( "icu", (DWORD)tSettings.m_ePreprocessor, (DWORD)Preprocessor_e::NONE );
tOut.NamedStringNonEmpty ( "index_token_filter", tSettings.m_sIndexTokenFilter );
tOut.NamedValNonDefault ( "blob_update_space", tSettings.m_tBlobUpdateSpace );
tOut.NamedValNonDefault ( "skiplist_block_size", tSettings.m_iSkiplistBlockSize, 32 );
tOut.NamedStringNonEmpty ( "hitless_files", tSettings.m_sHitlessFiles );
tOut.NamedValNonDefault ( "engine", (DWORD)tSettings.m_eEngine, (DWORD)AttrEngine_e::DEFAULT );
tOut.NamedValNonDefault ( "engine_default", (DWORD)tSettings.m_eDefaultEngine, (DWORD)AttrEngine_e::ROWWISE );
tOut.NamedValNonDefault ( "jieba_mode", (DWORD)tSettings.m_eJiebaMode, (DWORD)JiebaMode_e::DEFAULT );
tOut.NamedValNonDefault ( "jieba_hmm", tSettings.m_bJiebaHMM, true );
tOut.NamedStringNonEmpty ( "jieba_user_dict_path", tSettings.m_sJiebaUserDictPath );
}
void SaveMutableSettings ( const MutableIndexSettings_c & tSettings, const CSphString & sSettingsFile )
{
CSphString sBuf;
if ( !tSettings.Save ( sBuf ) ) // no need to save in case settings were set from config
return;
CSphString sError;
CSphString sSettingsFileNew = SphSprintf ( "%s.new", sSettingsFile.cstr() );
CSphWriter tWriter;
if ( !tWriter.OpenFile ( sSettingsFileNew, sError ) )
sphDie ( "failed to serialize mutable settings: %s", sError.cstr() ); // !COMMIT handle this gracefully
tWriter.PutBytes ( sBuf.cstr(), sBuf.Length() );
tWriter.CloseFile();
if ( tWriter.IsError() )
{
sphWarning ( "%s", sError.cstr() );
return;
}
// rename
if ( sph::rename ( sSettingsFileNew.cstr(), sSettingsFile.cstr() ) )
sphDie ( "failed to rename mutable settings(src=%s, dst=%s, errno=%d, error=%s)", sSettingsFileNew.cstr(), sSettingsFile.cstr(), errno, strerrorm(errno) ); // !COMMIT handle this gracefully
}
AttrEngine_e CombineEngines ( AttrEngine_e eIndexEngine, AttrEngine_e eAttrEngine )
{
AttrEngine_e eEngine = eIndexEngine;
if ( eAttrEngine!=AttrEngine_e::DEFAULT )
eEngine = eAttrEngine;
return eEngine;
}
void SetDefaultAttrEngine ( AttrEngine_e eEngine )
{
g_eAttrEngine = eEngine;
}
AttrEngine_e GetDefaultAttrEngine()
{
return g_eAttrEngine;
}
| 101,786
|
C++
|
.cpp
| 2,566
| 37.169914
| 219
| 0.713104
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,853
|
docs_collector.cpp
|
manticoresoftware_manticoresearch/src/docs_collector.cpp
|
//
// Copyright (c) 2008-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "docs_collector.h"
#include "searchdaemon.h"
#include "indexfiles.h"
class DocsCollector_c::Impl_c
{
VecTraits_T<DocID_t> m_dFastSlice;
CSphVector<DocID_t> m_dValues;
CSphVector<BYTE> m_dCompressedDocids;
std::unique_ptr<MemoryReader_c> m_pCompressedReader;
int m_iFastIdx = 0;
DocID_t m_iLastId = -1;
bool m_bFastPath = false;
// check the short path - if we have clauses 'id=smth' or 'id in (xx,yy)' or 'id in @uservar' - we know
// all the values list immediatelly and don't have to run the heavy query here.
bool ProcessFast( const CSphQuery& tQuery )
{
if ( !tQuery.m_sQuery.IsEmpty() || !tQuery.m_dFilterTree.IsEmpty() || tQuery.m_dFilters.GetLength() != 1 )
return false;
const CSphFilterSettings* pFilter = tQuery.m_dFilters.Begin();
if ( ( pFilter->m_bHasEqualMin || pFilter->m_bHasEqualMax ) && pFilter->m_eType == SPH_FILTER_VALUES
&& ( pFilter->m_sAttrName == "@id" || pFilter->m_sAttrName == "id" ) && !pFilter->m_bExclude )
{
m_dFastSlice = pFilter->GetValues();
m_iFastIdx = 0;
m_bFastPath = true;
}
return m_bFastPath;
}
void ProcessFull ( const CSphQuery& tQuery, bool bJson, const CSphString& sIndex, const cServedIndexRefPtr_c& pDesc, CSphString* pError )
{
PubSearchHandler_c tHandler ( 1, CreateQueryParser ( bJson ), tQuery.m_eQueryType, false );
tHandler.PushIndex ( sIndex, pDesc );
tHandler.RunCollect ( tQuery, sIndex, pError, &m_dCompressedDocids );
if ( m_dCompressedDocids.IsEmpty() )
return;
m_pCompressedReader = std::make_unique<MemoryReader_c> ( m_dCompressedDocids );
m_iLastId = 0;
}
bool GetValuesChunkFast ( CSphVector<DocID_t>& dValues, int iValues=-1 )
{
assert ( m_bFastPath );
if ( iValues < 0 )
iValues = m_dFastSlice.GetLength() - m_iFastIdx;
else
iValues = Min ( iValues, m_dFastSlice.GetLength() - m_iFastIdx );
if ( !iValues )
return false;
for ( const auto& i : m_dFastSlice.Slice ( std::exchange ( m_iFastIdx, m_iFastIdx + iValues ), iValues ) )
dValues.Add ( i );
return true;
}
bool GetValuesChunkFull ( CSphVector<DocID_t>& dValues, int iValues )
{
assert ( !m_bFastPath );
if ( !m_pCompressedReader || !m_pCompressedReader->HasData() )
return false;
while ( m_pCompressedReader->HasData() && iValues-- )
{
m_iLastId += m_pCompressedReader->UnzipOffset();
dValues.Add ( m_iLastId );
}
return true;
}
public:
Impl_c ( const CSphQuery& tQuery, bool bJson, const CSphString& sIndex, const cServedIndexRefPtr_c& pDesc, CSphString* pError )
{
if ( !ProcessFast ( tQuery ) )
ProcessFull ( tQuery, bJson, sIndex, pDesc, pError );
}
bool GetValuesChunk ( CSphVector<DocID_t>& dValues, int iValues )
{
dValues.Resize(0);
return m_bFastPath ? GetValuesChunkFast ( dValues, iValues ) : GetValuesChunkFull ( dValues, iValues );
}
// beware, that slice lives together with this class, and will became undefined once it destroyed.
VecTraits_T<DocID_t> GetValuesSlice ()
{
if ( m_bFastPath )
return m_dFastSlice;
if ( !m_pCompressedReader && !m_dCompressedDocids.IsEmpty() )
{
m_pCompressedReader = std::make_unique<MemoryReader_c> ( m_dCompressedDocids );
m_iLastId = 0;
m_dValues.Reset();
}
if ( m_pCompressedReader )
while ( m_pCompressedReader->HasData() )
{
m_iLastId += m_pCompressedReader->UnzipOffset();
m_dValues.Add ( m_iLastId );
}
return m_dValues;
}
};
/// public iface
DocsCollector_c::DocsCollector_c ( const CSphQuery& tQuery, bool bJson, const CSphString& sIndex, const cServedIndexRefPtr_c& pDesc, CSphString* pError )
: m_pImpl { std::make_unique<Impl_c> ( tQuery, bJson, sIndex, pDesc, pError ) }
{}
DocsCollector_c::~DocsCollector_c() = default;
DocsCollector_c::DocsCollector_c ( DocsCollector_c&& rhs ) noexcept
: m_pImpl ( std::exchange ( rhs.m_pImpl, nullptr ) )
{}
bool DocsCollector_c::GetValuesChunk ( CSphVector<DocID_t>& dValues, int iValues )
{
return m_pImpl->GetValuesChunk( dValues, iValues );
}
VecTraits_T<DocID_t> DocsCollector_c::GetValuesSlice()
{
return m_pImpl->GetValuesSlice();
}
| 4,554
|
C++
|
.cpp
| 121
| 35.016529
| 153
| 0.712925
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,854
|
exprdocstore.cpp
|
manticoresoftware_manticoresearch/src/exprdocstore.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "exprdocstore.h"
#include "exprtraits.h"
#include "docstore.h"
//////////////////////////////////////////////////////////////////////////
template <bool POSTLIMIT>
class Expr_GetStored_T : public ISphExpr
{
public:
Expr_GetStored_T ( CSphString sField, DocstoreDataType_e eDocstoreType, ESphAttr eAttrType );
float Eval ( const CSphMatch & tMatch ) const final;
int IntEval ( const CSphMatch & tMatch ) const final;
int64_t Int64Eval ( const CSphMatch & tMatch ) const final;
bool IsDataPtrAttr() const final { return sphIsBlobAttr(m_eAttrType); }
bool IsStored() const final { return !POSTLIMIT; }
bool UsesDocstore() const { return true; }
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const;
const BYTE * StringEvalPacked ( const CSphMatch & tMatch ) const final { return GetBlobPacked(tMatch); }
void Command ( ESphExprCommand eCmd, void * pArg ) final;
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) final {}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final;
ISphExpr * Clone() const final { return new Expr_GetStored_T ( m_sField, m_eDocstoreType, m_eAttrType ); }
private:
CSphString m_sField;
DocstoreDataType_e m_eDocstoreType = DOCSTORE_TEXT;
ESphAttr m_eAttrType = SPH_ATTR_INTEGER;
CSphVector<int> m_dFieldIds;
DocstoreSession_c::InfoRowID_t m_tSessionRowID;
DocstoreSession_c::InfoDocID_t m_tSessionDocID;
VecTraits_T<const BYTE> GetBlob ( DocstoreDoc_t & tDoc, const CSphMatch & tMatch ) const;
const BYTE * GetBlobPacked ( const CSphMatch & tMatch ) const;
template <typename T> T ConvertBlobType ( const VecTraits_T<const BYTE> & dBlob ) const;
};
template <bool POSTLIMIT>
Expr_GetStored_T<POSTLIMIT>::Expr_GetStored_T ( CSphString sField, DocstoreDataType_e eDocstoreType, ESphAttr eAttrType )
: m_sField ( std::move(sField) )
, m_eDocstoreType ( eDocstoreType )
, m_eAttrType ( eAttrType )
{}
template <bool POSTLIMIT>
float Expr_GetStored_T<POSTLIMIT>::Eval ( const CSphMatch & tMatch ) const
{
assert ( m_eDocstoreType==DOCSTORE_ATTR );
assert ( m_eAttrType!=SPH_ATTR_STRING && m_eAttrType!=SPH_ATTR_UINT32SET && m_eAttrType!=SPH_ATTR_INT64SET && m_eAttrType!=SPH_ATTR_BIGINT );
DocstoreDoc_t tDoc;
VecTraits_T<const BYTE> tBlob = GetBlob ( tDoc, tMatch );
return tBlob.Begin() ? *(const float*)tBlob.Begin() : 0.0f;
}
template <bool POSTLIMIT>
int Expr_GetStored_T<POSTLIMIT>::IntEval ( const CSphMatch & tMatch ) const
{
assert ( m_eDocstoreType==DOCSTORE_ATTR );
DocstoreDoc_t tDoc;
return ConvertBlobType<DWORD> ( GetBlob ( tDoc, tMatch ) );
}
template <bool POSTLIMIT>
int64_t Expr_GetStored_T<POSTLIMIT>::Int64Eval ( const CSphMatch & tMatch ) const
{
assert ( m_eDocstoreType==DOCSTORE_ATTR );
DocstoreDoc_t tDoc;
switch ( m_eAttrType )
{
case SPH_ATTR_UINT32SET_PTR:
case SPH_ATTR_INT64SET_PTR:
case SPH_ATTR_FLOAT_VECTOR_PTR:
return (int64_t)sphPackPtrAttr ( GetBlob ( tDoc, tMatch ) );
default:
return ConvertBlobType<int64_t> ( GetBlob ( tDoc, tMatch ) );
}
}
template <bool POSTLIMIT>
int Expr_GetStored_T<POSTLIMIT>::StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const
{
DocstoreDoc_t tDoc;
VecTraits_T<const BYTE> tRes = GetBlob ( tDoc, tMatch );
*ppStr = tDoc.m_dFields[0].LeakData();
return tRes.GetLength();
}
template <bool POSTLIMIT>
void Expr_GetStored_T<POSTLIMIT>::Command ( ESphExprCommand eCmd, void * pArg )
{
if ( eCmd!=SPH_EXPR_SET_DOCSTORE_DOCID )
return;
m_dFieldIds.Resize(0);
assert(pArg);
m_tSessionDocID = *(DocstoreSession_c::InfoDocID_t*)pArg;
assert ( m_tSessionDocID.m_pDocstore );
int iFieldId = m_tSessionDocID.m_pDocstore->GetFieldId ( m_sField.cstr(), m_eDocstoreType );
if ( iFieldId!=-1 )
m_dFieldIds.Add(iFieldId);
}
template <bool POSTLIMIT>
uint64_t Expr_GetStored_T<POSTLIMIT>::GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME("Expr_GetStored_T");
CALC_STR_HASH(m_sField, m_sField.Length());
CALC_POD_HASHES(m_dFieldIds);
return CALC_DEP_HASHES();
}
template <bool POSTLIMIT>
VecTraits_T<const BYTE> Expr_GetStored_T<POSTLIMIT>::GetBlob ( DocstoreDoc_t & tDoc, const CSphMatch & tMatch ) const
{
if ( !m_tSessionDocID.m_pDocstore || !m_dFieldIds.GetLength() )
return { nullptr, 0 };
DocID_t tDocID = sphGetDocID ( tMatch.m_pDynamic ? tMatch.m_pDynamic : tMatch.m_pStatic );
if ( m_tSessionDocID.m_pDocstore->GetDoc ( tDoc, tDocID, &m_dFieldIds, m_tSessionDocID.m_iSessionId, false ) )
return tDoc.m_dFields[0];
return { nullptr, 0 };
}
template <bool POSTLIMIT>
const BYTE * Expr_GetStored_T<POSTLIMIT>::GetBlobPacked ( const CSphMatch & tMatch ) const
{
if ( !m_tSessionDocID.m_pDocstore || !m_dFieldIds.GetLength() )
return nullptr;
DocID_t tDocID = sphGetDocID ( tMatch.m_pDynamic ? tMatch.m_pDynamic : tMatch.m_pStatic );
DocstoreDoc_t tDoc;
if ( m_tSessionDocID.m_pDocstore->GetDoc ( tDoc, tDocID, &m_dFieldIds, m_tSessionDocID.m_iSessionId, true ) )
return tDoc.m_dFields[0].LeakData();
return nullptr;
}
template <bool POSTLIMIT>
template <typename T>
T Expr_GetStored_T<POSTLIMIT>::ConvertBlobType ( const VecTraits_T<const BYTE> & dBlob ) const
{
int64_t iValue = 0;
switch ( dBlob.GetLength() )
{
case 4: iValue = *(const DWORD*)dBlob.Begin(); break;
case 8: iValue = *(const int64_t*)dBlob.Begin(); break;
default: break;
}
return (T)iValue;
}
template <>
void Expr_GetStored_T<false>::Command ( ESphExprCommand eCmd, void * pArg )
{
switch ( eCmd )
{
case SPH_EXPR_GET_COLUMNAR_COL:
*(CSphString*)pArg = m_sField;
break;
case SPH_EXPR_SET_DOCSTORE_ROWID:
{
m_dFieldIds.Resize(0);
assert(pArg);
m_tSessionRowID = *(DocstoreSession_c::InfoRowID_t*)pArg;
assert ( m_tSessionRowID.m_pDocstore );
int iFieldId = m_tSessionRowID.m_pDocstore->GetFieldId ( m_sField.cstr(), m_eDocstoreType );
if ( iFieldId!=-1 )
m_dFieldIds.Add(iFieldId);
}
break;
default:
break;
}
}
template <>
VecTraits_T<const BYTE> Expr_GetStored_T<false>::GetBlob ( DocstoreDoc_t & tDoc, const CSphMatch & tMatch ) const
{
if ( !m_tSessionRowID.m_pDocstore || !m_dFieldIds.GetLength() )
return { nullptr, 0 };
tDoc = m_tSessionRowID.m_pDocstore->GetDoc ( tMatch.m_tRowID, &m_dFieldIds, m_tSessionRowID.m_iSessionId, false );
return tDoc.m_dFields[0];
}
template <>
const BYTE * Expr_GetStored_T<false>::GetBlobPacked ( const CSphMatch & tMatch ) const
{
if ( !m_tSessionRowID.m_pDocstore || !m_dFieldIds.GetLength() )
return nullptr;
DocstoreDoc_t tDoc = m_tSessionRowID.m_pDocstore->GetDoc ( tMatch.m_tRowID, &m_dFieldIds, m_tSessionRowID.m_iSessionId, true );
return tDoc.m_dFields[0].LeakData();
}
///////////////////////////////////////////////////////////////////////////////
ISphExpr * CreateExpr_GetStoredField ( const CSphString & sName )
{
return new Expr_GetStored_T<true> ( sName, DOCSTORE_TEXT, SPH_ATTR_STRING );
}
ISphExpr * CreateExpr_GetStoredAttr ( const CSphString & sName, ESphAttr eAttr )
{
return new Expr_GetStored_T<false> ( sName, DOCSTORE_ATTR, eAttr );
}
| 7,581
|
C++
|
.cpp
| 191
| 37.774869
| 142
| 0.725789
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,855
|
pseudosharding.cpp
|
manticoresoftware_manticoresearch/src/pseudosharding.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2011-2016, Andrew Aksyonoff
// Copyright (c) 2011-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "pseudosharding.h"
#include <math.h>
void DistributeThreadsOverIndexes ( IntVec_t & dThreads, const CSphVector<SplitData_t> & dSplitData, int iConcurrency )
{
dThreads.Resize ( dSplitData.GetLength() );
dThreads.Fill(1);
int64_t iTotalMetric = 0;
for ( auto & i : dSplitData )
iTotalMetric += i.m_iMetric;
// ignore indexes with thread cap==1; they won't get more that 1 thread
int iThreadsUsed = dSplitData.count_of ( []( auto & i ){ return i.m_iThreadCap==1; } );
// split remaining threads between left indexes (and apply the thread cap)
int iThreadsLeft = iConcurrency-iThreadsUsed;
ARRAY_FOREACH ( i, dSplitData )
{
const SplitData_t & tSD = dSplitData[i];
int & iThreads = dThreads[i];
if ( tSD.m_iThreadCap==1 )
continue;
assert ( tSD.m_iMetric>=0 );
iThreads = Max ( (int)round ( float(tSD.m_iMetric) / iTotalMetric * iThreadsLeft ), 1 );
if ( tSD.m_iThreadCap > 1 )
iThreads = Min ( iThreads, tSD.m_iThreadCap );
}
int iCappedThreads = 0;
int iNonCappedThreads = 0;
ARRAY_FOREACH ( i, dSplitData )
if ( dSplitData[i].m_iThreadCap >= 1 )
iCappedThreads += dThreads[i];
else
iNonCappedThreads += dThreads[i];
// might get negative due to rounding
iThreadsLeft = Max ( iConcurrency-iCappedThreads, 0 );
ARRAY_FOREACH ( i, dSplitData )
if ( dSplitData[i].m_iThreadCap < 1 )
dThreads[i] = Max ( (int)round ( iThreadsLeft * dThreads[i]/iNonCappedThreads ), 1 );
}
int CalcMaxThreadsPerIndex ( int iConcurrency, int iNumIndexes )
{
return iNumIndexes<iConcurrency ? ( iConcurrency-iNumIndexes ) + 1 : 1;
}
| 2,058
|
C++
|
.cpp
| 52
| 37.326923
| 119
| 0.722668
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,856
|
sortertraits.cpp
|
manticoresoftware_manticoresearch/src/sortertraits.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sortertraits.h"
#include "schematransform.h"
void MatchSorter_c::SetSchema ( ISphSchema * pSchema, bool bRemapCmp )
{
assert ( pSchema );
m_tState.FixupLocators ( m_pSchema, pSchema, bRemapCmp );
m_pSchema = pSchema;
}
void MatchSorter_c::SetState ( const CSphMatchComparatorState & tState )
{
m_tState = tState;
m_tState.m_iNow = (DWORD) time ( nullptr );
}
void MatchSorter_c::CloneTo ( ISphMatchSorter * pTrg ) const
{
assert ( pTrg );
pTrg->SetRandom(m_bRandomize);
pTrg->SetState(m_tState);
pTrg->SetSchema ( m_pSchema->CloneMe(), false );
}
bool MatchSorter_c::CanBeCloned() const
{
if ( !m_pSchema )
return true;
bool bGotStatefulUDF = false;
for ( int i = 0; i < m_pSchema->GetAttrsCount() && !bGotStatefulUDF; i++ )
{
auto & pExpr = m_pSchema->GetAttr(i).m_pExpr;
if ( pExpr )
pExpr->Command ( SPH_EXPR_GET_STATEFUL_UDF, &bGotStatefulUDF );
}
return !bGotStatefulUDF;
}
void MatchSorter_c::SetFilteredAttrs ( const sph::StringSet & hAttrs, bool bAddDocid )
{
assert ( m_pSchema );
m_dTransformed.Reserve ( hAttrs.GetLength() );
if ( bAddDocid && !hAttrs[sphGetDocidName()] )
m_dTransformed.Add ( sphGetDocidName() );
for ( auto & tName : hAttrs )
{
const CSphColumnInfo * pCol = m_pSchema->GetAttr ( tName.first.cstr() );
if ( pCol )
m_dTransformed.Add ( pCol->m_sName );
}
}
void MatchSorter_c::TransformPooled2StandalonePtrs ( GetBlobPoolFromMatch_fn fnBlobPoolFromMatch, GetColumnarFromMatch_fn fnGetColumnarFromMatch, bool bFinalizeSorters )
{
auto * pOldSchema = GetSchema();
assert ( pOldSchema );
// create new standalone schema (from old, or from filtered)
auto * pNewSchema = new CSphSchema ( "standalone" );
for ( int i = 0; i<pOldSchema->GetFieldsCount (); ++i )
pNewSchema->AddField ( pOldSchema->GetField(i) );
TransformedSchemaBuilder_c tBuilder ( *pOldSchema, *pNewSchema );
if ( m_dTransformed.IsEmpty() )
{
// keep id as the first attribute
const CSphColumnInfo* pId = pOldSchema->GetAttr ( sphGetDocidName() );
if ( pId )
tBuilder.AddAttr ( sphGetDocidName() );
// add the rest
for ( int i = 0; i<pOldSchema->GetAttrsCount (); i++ )
{
const CSphColumnInfo & tAttr = pOldSchema->GetAttr(i);
if ( tAttr.m_sName!=sphGetDocidName() && tAttr.m_sName!=GetNullMaskAttrName() )
tBuilder.AddAttr ( tAttr.m_sName );
}
}
else
{
// keep id as the first attribute, then the rest.
m_dTransformed.any_of ( [&tBuilder] ( const auto& sName ) { auto bID = ( sName==sphGetDocidName() ); if ( bID ) tBuilder.AddAttr(sName); return bID; } );
m_dTransformed.for_each ( [&tBuilder] ( const auto& sName ) { if ( sName!=sphGetDocidName() && sName!=GetNullMaskAttrName() ) tBuilder.AddAttr(sName); } );
}
for ( int i = 0; i <pNewSchema->GetAttrsCount(); ++i )
{
auto & pExpr = pNewSchema->GetAttr(i).m_pExpr;
if ( pExpr )
pExpr->FixupLocator ( pOldSchema, pNewSchema );
}
tBuilder.Finalize();
auto pMatchesToNewSchema = std::unique_ptr<MatchProcessor_i>( CreateMatchSchemaTransform ( pOldSchema, pNewSchema, std::move ( fnBlobPoolFromMatch ), std::move ( fnGetColumnarFromMatch ) ) );
Finalize ( *pMatchesToNewSchema, false, bFinalizeSorters );
SetSchema ( pNewSchema, true );
}
///////////////////////////////////////////////////////////////////////////////
CSphMatchQueueTraits::CSphMatchQueueTraits ( int iSize )
: m_iSize ( iSize )
, m_dData { iSize }
{
assert ( iSize>0 );
m_iMatchCapacity = iSize;
m_dIData.Resize ( iSize );
m_tState.m_iNow = (DWORD) time ( nullptr );
ARRAY_FOREACH ( i, m_dIData )
m_dIData[i] = i;
m_dIData.Resize ( 0 );
}
/// dtor make FreeDataPtrs here, then ResetDynamic also get called on m_dData d-tr.
CSphMatchQueueTraits::~CSphMatchQueueTraits ()
{
if ( m_pSchema )
m_dData.Apply ( [this] ( CSphMatch& tMatch ) { m_pSchema->FreeDataPtrs ( tMatch ); } );
}
void CSphMatchQueueTraits::SwapMatchQueueTraits ( CSphMatchQueueTraits& rhs )
{
// ISphMatchSorter
::Swap ( m_iTotal, rhs.m_iTotal );
// CSphMatchQueueTraits
m_dData.SwapData ( rhs.m_dData );
m_dIData.SwapData ( rhs.m_dIData );
assert ( m_iSize==rhs.m_iSize );
}
CSphMatch & CSphMatchQueueTraits::Add()
{
// proper ids at m_dIData already set at constructor
// they will be same during life-span - that is why Add used like anti-Pop
int iLast = m_dIData.Add();
return m_dData[iLast];
}
int CSphMatchQueueTraits::ResetDynamic ( int iMaxUsed )
{
for ( int i=0; i<iMaxUsed; i++ )
m_dData[i].ResetDynamic();
return -1;
}
int CSphMatchQueueTraits::ResetDynamicFreeData ( int iMaxUsed )
{
for ( int i=0; i<iMaxUsed; i++ )
{
m_pSchema->FreeDataPtrs ( m_dData[i] );
m_dData[i].ResetDynamic();
}
return -1;
}
///////////////////////////////////////////////////////////////////////////////
void MatchCloner_c::SetSchema ( const ISphSchema * pSchema )
{
m_pSchema = (const CSphSchemaHelper *) pSchema; /// lazy hack
m_dRowBuf.Reset ( m_pSchema->GetDynamicSize() );
}
// clone plain part (incl. pointers) from src to dst
// keep group part (aggregates, group_concat) of dst intact
// it assumes that tDst m_pDynamic contains correct data, or wiped away.
void MatchCloner_c::CloneKeepingAggrs ( CSphMatch & tDst, const CSphMatch & tSrc )
{
assert ( m_pSchema );
assert ( m_bPtrRowsCommited );
// memorize old dynamic first
memcpy ( m_dRowBuf.Begin(), tDst.m_pDynamic, m_dRowBuf.GetLengthBytes() );
m_pSchema->CloneMatchSpecial ( tDst, tSrc, m_dOtherPtrRows );
/*
FreeDataSpecial ( tDst, m_dOtherPtrRows );
pDst->Combine ( *pSrc, GetDynamicSize () );
CopyPtrsSpecial ( tDst, tSrc, m_dOtherPtrRows );
*/
// restore back group-by attributes
for ( auto & tAttrGrp : m_dAttrsGrp )
tDst.SetAttr ( tAttrGrp, sphGetRowAttr ( m_dRowBuf.Begin(), tAttrGrp ) );
// restore back group_concat attribute(s)
for ( auto & tAttrPtr : m_dAttrsPtr )
tDst.SetAttr ( tAttrPtr, sphGetRowAttr ( m_dRowBuf.Begin (), tAttrPtr ) );
}
// clone plain part (incl. pointers) from src to dst
// group part (aggregates, group_concat) is not copied
void MatchCloner_c::CloneWithoutAggrs ( CSphMatch & tDst, const CSphMatch & tSrc )
{
assert ( m_pSchema );
assert ( m_bPtrRowsCommited );
m_pSchema->CloneMatchSpecial ( tDst, tSrc, m_dOtherPtrRows );
/*
FreeDataSpecial ( tDst, m_dOtherPtrRows );
pDst->Combine ( *pSrc, GetDynamicSize () );
CopyPtrsSpecial ( tDst, tSrc, m_dOtherPtrRows );
*/
}
// just write group part (aggregates, group_concat) without cloning
// assumes tDst has allocated m_pDynamic. Fixme! look to #881 again...
void MatchCloner_c::CopyAggrs ( CSphMatch & tDst, const CSphMatch & tSrc )
{
assert ( m_pSchema );
assert ( m_bPtrRowsCommited );
assert ( &tDst!=&tSrc );
assert ( tDst.m_pDynamic );
for ( auto & dAttrGrp : m_dAttrsGrp )
tDst.SetAttr ( dAttrGrp, tSrc.GetAttr ( dAttrGrp ));
CSphSchemaHelper::FreeDataSpecial ( tDst, m_dMyPtrRows );
CSphSchemaHelper::CopyPtrsSpecial ( tDst, tSrc, m_dMyPtrRows );
}
// copy group part (aggregates)
// move group_concat part without reallocating
void MatchCloner_c::MoveAggrs ( CSphMatch & tDst, CSphMatch & tSrc )
{
assert ( m_pSchema );
assert ( m_bPtrRowsCommited );
assert ( &tDst!=&tSrc );
assert ( tDst.m_pDynamic );
for ( auto & dAttrGrp : m_dAttrsGrp )
tDst.SetAttr ( dAttrGrp, tSrc.GetAttr ( dAttrGrp ));
CSphSchemaHelper::MovePtrsSpecial( tDst, tSrc, m_dMyPtrRows );
}
void MatchCloner_c::ResetAttrs()
{
m_dAttrsGrp.Resize ( 0 );
m_dAttrsPtr.Resize ( 0 );
}
void MatchCloner_c::CommitPtrs()
{
assert ( m_pSchema );
static const int SIZE_OF_ROW = 8 * sizeof ( CSphRowitem );
if ( m_bPtrRowsCommited )
m_dMyPtrRows.Resize(0);
for ( const CSphAttrLocator &tLoc : m_dAttrsPtr )
m_dMyPtrRows.Add ( tLoc.m_iBitOffset / SIZE_OF_ROW );
m_dOtherPtrRows = m_pSchema->SubsetPtrs ( m_dMyPtrRows );
#ifndef NDEBUG
// sanitize check
m_dMyPtrRows = m_pSchema->SubsetPtrs ( m_dOtherPtrRows );
assert ( m_dMyPtrRows.GetLength ()==m_dAttrsPtr.GetLength () );
#endif
m_bPtrRowsCommited = true;
}
///////////////////////////////////////////////////////////////////////////////
void BaseGroupSorter_c::SetColumnar ( columnar::Columnar_i * pColumnar )
{
for ( auto i : m_dAggregates )
i->SetColumnar(pColumnar);
}
void BaseGroupSorter_c::SetupBaseGrouper ( ISphSchema * pSchema, int iDistinct, CSphVector<AggrFunc_i *> * pAvgs )
{
m_tPregroup.ResetAttrs();
ResetAggregates();
m_tPregroup.SetSchema ( pSchema );
m_tPregroup.AddRaw ( m_tLocGroupby ); // @groupby
m_tPregroup.AddRaw ( m_tLocCount ); // @count
if ( iDistinct )
m_tPregroup.AddRaw ( m_tLocDistinct ); // @distinct
// extract aggregates
for ( int i = 0; i<pSchema->GetAttrsCount (); ++i )
{
const CSphColumnInfo &tAttr = pSchema->GetAttr ( i );
if ( tAttr.m_eAggrFunc==SPH_AGGR_NONE
|| IsGroupbyMagic ( tAttr.m_sName ) // @count, @groupby, @groupbystr, @distinct, count(*), groupby()
|| IsSortStringInternal ( tAttr.m_sName.cstr () ) )
continue;
switch ( tAttr.m_eAggrFunc )
{
case SPH_AGGR_SUM: m_dAggregates.Add ( CreateAggrSum(tAttr) ); break;
case SPH_AGGR_AVG:
m_dAggregates.Add ( CreateAggrAvg ( tAttr, m_tLocCount ) );
// store avg to calculate these attributes prior to groups sort
if ( pAvgs )
pAvgs->Add ( m_dAggregates.Last() );
break;
case SPH_AGGR_MIN: m_dAggregates.Add ( CreateAggrMin(tAttr) ); break;
case SPH_AGGR_MAX: m_dAggregates.Add ( CreateAggrMax(tAttr) ); break;
case SPH_AGGR_CAT:
m_dAggregates.Add ( CreateAggrConcat(tAttr) );
m_tPregroup.AddPtr ( tAttr.m_tLocator );
break;
default: assert ( 0 && "internal error: unhandled aggregate function" );
break;
}
if ( tAttr.m_eAggrFunc!=SPH_AGGR_CAT )
m_tPregroup.AddRaw ( tAttr.m_tLocator );
}
m_tPregroup.CommitPtrs();
}
bool BaseGroupSorter_c::EvalHAVING ( const CSphMatch& tMatch )
{
return !m_pAggrFilterTrait || m_pAggrFilterTrait->Eval ( tMatch );
}
void BaseGroupSorter_c::AggrUpdate ( CSphMatch & tDst, const CSphMatch & tSrc, bool bGrouped, bool bMerge )
{
for ( auto * pAggregate : this->m_dAggregates )
pAggregate->Update ( tDst, tSrc, bGrouped, bMerge );
}
void BaseGroupSorter_c::AggrSetup ( CSphMatch & tDst, const CSphMatch & tSrc, bool bMerge )
{
for ( auto * pAggregate : this->m_dAggregates )
pAggregate->Setup ( tDst, tSrc, bMerge );
}
void BaseGroupSorter_c::AggrUngroup ( CSphMatch & tMatch )
{
for ( auto * pAggregate : this->m_dAggregates )
pAggregate->Ungroup ( tMatch );
}
void BaseGroupSorter_c::ResetAggregates()
{
for ( auto & pAggregate : m_dAggregates )
SafeDelete ( pAggregate );
m_dAggregates.Resize(0);
}
| 11,027
|
C++
|
.cpp
| 305
| 33.911475
| 192
| 0.701655
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,857
|
conversion.cpp
|
manticoresoftware_manticoresearch/src/conversion.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "conversion.h"
#include <cmath>
#include <cfloat>
DWORD sphToDword ( const char * s )
{
if ( !s ) return 0;
return strtoul ( s, NULL, 10 );
}
// move to source mysql
float sphToFloat ( const char * s )
{
if ( !s ) return 0.0f;
double fRes = strtod ( s, NULL );
if ( fRes==-HUGE_VAL || fRes<=-FLT_MAX )
return -FLT_MAX;
if ( fRes==HUGE_VAL || fRes>=FLT_MAX )
return FLT_MAX;
return (float)fRes;
}
int64_t sphToInt64 ( const char * szNumber, CSphString * pError )
{
if ( !szNumber )
return 0;
char * szEndPtr = nullptr;
errno = 0;
int64_t iNumber = strtoll ( szNumber, &szEndPtr, 10 );
if ( pError )
{
if ( szNumber==szEndPtr )
pError->SetSprintf ( "invalid number \"%s\", " INT64_FMT " assumed", szNumber, iNumber );
else if ( errno==ERANGE && iNumber==LONG_MIN )
pError->SetSprintf ( "underflow detected \"%s\", " INT64_FMT " assumed", szNumber, iNumber );
else if ( errno==ERANGE && iNumber==LONG_MAX )
pError->SetSprintf ( "overflow detected \"%s\", " INT64_FMT " assumed", szNumber, iNumber );
}
return iNumber;
}
uint64_t sphToUInt64 ( const char * szNumber, CSphString * pError )
{
if ( !szNumber )
return 0;
char * szEndPtr = nullptr;
errno = 0;
uint64_t uNumber = strtoull ( szNumber, &szEndPtr, 10 );
if ( pError )
{
if ( szNumber==szEndPtr )
pError->SetSprintf ( "invalid number \"%s\", " UINT64_FMT " assumed", szNumber, uNumber );
else if ( errno==ERANGE )
pError->SetSprintf ( "overflow detected \"%s\", " UINT64_FMT " assumed", szNumber, uNumber );
}
return uNumber;
}
uint64_t StrToDocID ( const char * szNumber, CSphString & sError )
{
if ( szNumber && *szNumber=='-' )
{
sError = "Negative document ids are not allowed";
return 0;
}
uint64_t uDocID = sphToUInt64 ( szNumber, &sError );
if ( !sError.IsEmpty() )
return 0;
return uDocID;
}
| 2,328
|
C++
|
.cpp
| 76
| 28.460526
| 96
| 0.682205
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,858
|
compressed_zlib_mysql.cpp
|
manticoresoftware_manticoresearch/src/compressed_zlib_mysql.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "compressed_zlib_mysql.h"
#include "compressed_mysql_layer.h"
#include <zlib.h>
static const int LEVEL_COMPRESSION = Z_DEFAULT_COMPRESSION;
class ZlibCompressor
{
protected:
using csize_t = uLong;
inline static csize_t Common_compressBound ( csize_t uSize )
{
return (csize_t)compressBound (uSize);
}
inline static int Common_compress ( BYTE* pDest, csize_t* pDestLen, const BYTE* pSource, csize_t uSourceLen )
{
return compress2 ( pDest, pDestLen, pSource, uSourceLen, LEVEL_COMPRESSION );
}
inline static bool Common_uncompress ( BYTE* pDest, csize_t* pDestLen, const BYTE* pSource, csize_t uSourceLen )
{
auto iZResult = uncompress ( pDest, (csize_t*) pDestLen, pSource, uSourceLen );
return iZResult == Z_OK;
}
};
bool IsZlibCompressionAvailable()
{
return true;
}
void MakeZlibMysqlCompressedLayer ( std::unique_ptr<AsyncNetBuffer_c> & pSource )
{
pSource = std::make_unique<MysqlCompressedSocket_T<ZlibCompressor>> ( std::move ( pSource ) );
}
| 1,366
|
C++
|
.cpp
| 39
| 33.307692
| 113
| 0.755876
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,859
|
mini_timer.cpp
|
manticoresoftware_manticoresearch/src/mini_timer.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "mini_timer.h"
#include "threadutils.h"
#include "timeout_queue.h"
#ifndef VERBOSE_TIMER
#define VERBOSE_TIMER 0
#endif
#if VERBOSE_TIMER
#define LOG_LEVEL_TIMER true
#else
#define LOG_LEVEL_TIMER false
#endif
namespace Time
{
CSphString Stamp()
{
return StringBuilder_c().Sprintf( "[%t] ", TimePrefixed::TimeStamp() ).cstr();
}
}
#define LOG_COMPONENT_TSKX "X " << Time::Stamp()
#define LOG_COMPONENT_TSKT "T " << Time::Stamp()
#define INFOX LOGMSG ( INFO, TIMER, TSKX )
#define DEBUGT LOGMSG ( DEBUG, TIMER, TSKT )
#define DEBUGX LOGMSG ( DEBUG, TIMER, TSKX )
static std::atomic<int64_t> g_tmLastTimestamp { sphMicroTimer() };
inline static int64_t MicroTimerImpl()
{
int64_t tmTimestamp = sphMicroTimer();
g_tmLastTimestamp.store ( tmTimestamp, std::memory_order_relaxed );
return tmTimestamp;
}
inline static int64_t LastTimestampImpl()
{
return g_tmLastTimestamp.load ( std::memory_order_relaxed );
}
int64_t sph::MicroTimer()
{
return MicroTimerImpl();
}
int64_t sph::LastTimestamp()
{
return LastTimestampImpl();
}
/// timer thread context
static ThreadRole TimerThread;
static bool g_bTimerCreated = false;
static bool g_bTimerActive = false;
class TinyTimer_c
{
// the queue
mutable CSphMutex m_tTimeoutsGuard; // guard is need as we can add/remove elements from any thread. That is short-live.
TimeoutQueue_c m_dTimeouts GUARDED_BY ( m_tTimeoutsGuard );
// management
OneshotEvent_c m_tSignal;
std::atomic<bool> m_bInterrupted { true };
// thread
SphThread_t m_tCounterThread;
std::atomic<Threads::LowThreadDesc_t*> m_pCounterThread { nullptr };
private:
inline bool IsInterrupted() const
{
return m_bInterrupted.load(std::memory_order_relaxed) || sphInterrupted(); // aliased, as we can override it in tests while mocking
}
void Enqueue ( MiniTimer_c& tTask ) EXCLUDES ( TimerThread )
{
DEBUGT << "enqueue " << &tTask;
{
ScopedMutex_t tTimeoutsLock { m_tTimeoutsGuard };
m_dTimeouts.Change ( &tTask );
}
Kick();
}
int GetNextWaitPeriodMs() REQUIRES ( TimerThread ) EXCLUDES ( m_tTimeoutsGuard )
{
ScopedMutex_t tTimeoutsLock { m_tTimeoutsGuard };
if ( m_dTimeouts.IsEmpty() )
return -1;
auto* pTask = (MiniTimer_c*)m_dTimeouts.Root();
return (int)( ( pTask->m_iTimeoutTimeUS - MicroTimerImpl() ) / sph::TICKS_GRANULARITY );
}
MiniTimer_c* PopNextDeadlinedAction() EXCLUDES ( m_tTimeoutsGuard )
{
ScopedMutex_t tTimeoutsLock { m_tTimeoutsGuard };
if ( m_dTimeouts.IsEmpty() )
return nullptr;
auto pRoot = (MiniTimer_c*)m_dTimeouts.Root();
assert ( pRoot->m_iTimeoutTimeUS > 0 );
if ( !sph::TimeExceeded ( pRoot->m_iTimeoutTimeUS, MicroTimerImpl() ) )
return nullptr;
// timeout reached; have to do an action
DEBUGT << "timeout happens for " << pRoot << " deadline " << timestamp_t ( pRoot->m_iTimeoutTimeUS );
DEBUGT << m_dTimeouts.DebugDump ( "heap:" );
m_dTimeouts.Pop();
DEBUGT << "Oneshot task removed: " << pRoot;
return pRoot;
}
void ProcessTimerActions() REQUIRES ( TimerThread ) EXCLUDES ( m_tTimeoutsGuard )
{
for ( MiniTimer_c* pRoot = PopNextDeadlinedAction(); pRoot; pRoot = PopNextDeadlinedAction() )
if ( pRoot->m_fnOnTimer )
pRoot->m_fnOnTimer();
}
void Loop()
{
ScopedRole_c thSched ( TimerThread );
m_pCounterThread.store ( &Threads::MyThd(), std::memory_order_relaxed );
g_bTimerActive = true;
while ( !IsInterrupted () )
{
DEBUGT << "---------------------------- Loop() tick";
ProcessTimerActions();
int iWait = GetNextWaitPeriodMs();
if ( !iWait )
{
DEBUGT << "no sleep since timeout is 0; (" << timestamp_t ( iWait ) << ")";
continue;
}
DEBUGT << "calculated timeout is " << iWait << " ms (" << timestamp_t ( iWait ) << ")";
bool VARIABLE_IS_NOT_USED bWasKicked = m_tSignal.WaitEvent ( iWait );
DEBUGT << "awakened, reason=" << ( bWasKicked ? "kicked" : "timeout or error" );
}
g_bTimerActive = false;
AbortScheduled();
m_pCounterThread.store ( nullptr, std::memory_order_relaxed );
}
MiniTimer_c* PopNextAction() REQUIRES ( TimerThread ) EXCLUDES ( m_tTimeoutsGuard )
{
ScopedMutex_t tTimeoutsLock { m_tTimeoutsGuard };
if ( m_dTimeouts.IsEmpty() )
return nullptr;
auto pRoot = (MiniTimer_c*)m_dTimeouts.Root();
m_dTimeouts.Pop();
return pRoot;
}
/// abandon and release all events (on shutdown)
void AbortScheduled() REQUIRES ( TimerThread ) EXCLUDES ( m_tTimeoutsGuard )
{
DEBUGT << "AbortScheduled()";
assert ( IsInterrupted() );
for ( MiniTimer_c* pRoot = PopNextAction(); pRoot; pRoot = PopNextAction() )
if ( pRoot->m_fnOnTimer )
pRoot->m_fnOnTimer();
}
public:
TinyTimer_c()
{
MicroTimerImpl();
m_bInterrupted.store ( false, std::memory_order_release );
g_bTimerCreated = true;
Threads::RegisterIterator ( [this] ( Threads::ThreadFN& fnHandler ) {
fnHandler ( m_pCounterThread.load ( std::memory_order_relaxed ) );
} );
Threads::Create ( &m_tCounterThread, [this] { Loop (); }, false, "Timer" );
}
~TinyTimer_c()
{
DEBUGX << "~TinyTimer_c. Shutdown=" << IsInterrupted();
Stop();
}
void Stop()
{
m_bInterrupted.store ( true, std::memory_order_release );
if ( !g_bTimerActive )
return;
Kick();
Threads::Join ( &m_tCounterThread );
}
/// Kick the tasker
void Kick()
{
DEBUGX << "Timer kicked";
m_tSignal.SetEvent();
}
void EngageAt ( int64_t iTimeStampUS, MiniTimer_c& tTimer ) EXCLUDES ( TimerThread )
{
tTimer.m_iTimeoutTimeUS = iTimeStampUS;
DEBUGT << "Engage task: " << &tTimer << " after " << timestamp_t ( iTimeStampUS );
Enqueue ( tTimer );
}
int64_t Engage ( int64_t iTimePeriodUS, MiniTimer_c& tTimer ) EXCLUDES ( TimerThread )
{
if ( iTimePeriodUS < 0 || IsInterrupted() )
return -1;
EngageAt ( MicroTimerImpl() + iTimePeriodUS, tTimer );
return tTimer.m_iTimeoutTimeUS;
}
void Remove ( MiniTimer_c& tTimer ) EXCLUDES ( m_tTimeoutsGuard, TimerThread )
{
ScopedMutex_t tTimeoutsLock { m_tTimeoutsGuard };
DEBUGT << ((tTimer.m_iTimeoutIdx >= 0) ? "Removed from queue: " : "Not in queue: ") << &tTimer << " deadline " << timestamp_t ( tTimer.m_iTimeoutTimeUS );
m_dTimeouts.Remove ( &tTimer );
}
// statistics
void FillSchedInfo( CSphVector<sph::ScheduleInfo_t>& dRes) const EXCLUDES ( m_tTimeoutsGuard, TimerThread )
{
ScopedMutex_t tTimeoutsLock { m_tTimeoutsGuard };
m_dTimeouts.DebugDump ( [&dRes] ( EnqueuedTimeout_t* pMember ) {
auto& dInfo = dRes.Add();
auto* pScheduled = (MiniTimer_c*)pMember;
dInfo.m_iTimeoutStamp = pScheduled->m_iTimeoutTimeUS;
dInfo.m_sTask = pScheduled->m_szName;
} );
}
};
TinyTimer_c& g_TinyTimer()
{
static TinyTimer_c tTimer;
return tTimer;
}
void MiniTimer_c::EngageAt ( int64_t iTimeStampUS )
{
DEBUGT << "MiniTimer_c::EngageAt " << timestamp_t ( iTimeStampUS );
g_TinyTimer().EngageAt ( iTimeStampUS, *this );
}
void MiniTimer_c::EngageAt ( int64_t iTimeStampUS, Threads::Handler&& fnOnTimer )
{
DEBUGT << "MiniTimer_c::EngageAt " << timestamp_t ( iTimeStampUS );
assert ( !m_fnOnTimer );
m_fnOnTimer = std::move ( fnOnTimer );
g_TinyTimer().EngageAt ( iTimeStampUS, *this );
}
int64_t MiniTimer_c::Engage ( int64_t iTimePeriodMS )
{
auto iTimePeriodUS = iTimePeriodMS * 1000;
if ( iTimePeriodUS <= 0 )
return 0;
DEBUGT << "MiniTimer_c::Engage " << timespan_t ( iTimePeriodUS );
return g_TinyTimer().Engage ( iTimePeriodUS, *this );
}
int64_t MiniTimer_c::Engage ( int64_t iTimePeriodMS, Threads::Handler&& fnOnTimer )
{
auto iTimePeriodUS = iTimePeriodMS * 1000;
if ( iTimePeriodUS <= 0 )
return 0;
DEBUGT << "MiniTimer_c::Engage " << timespan_t ( iTimePeriodUS );
assert ( !m_fnOnTimer );
m_fnOnTimer = std::move ( fnOnTimer );
return g_TinyTimer().Engage ( iTimePeriodUS, *this );
}
void MiniTimer_c::UnEngage()
{
if ( g_bTimerCreated )
g_TinyTimer().Remove ( *this );
}
MiniTimer_c::~MiniTimer_c()
{
UnEngage();
}
/// returns true if provided timestamp is already reached or not
bool sph::TimeExceeded ( int64_t tmMicroTimestamp )
{
if ( tmMicroTimestamp <= 0 )
return false;
return sph::TimeExceeded ( tmMicroTimestamp, LastTimestampImpl() );
}
void sph::ShutdownMiniTimer()
{
if ( g_bTimerActive )
g_TinyTimer().Stop();
}
// statistics
CSphVector<sph::ScheduleInfo_t> sph::GetSchedInfo()
{
CSphVector<sph::ScheduleInfo_t> dRes;
if ( g_bTimerCreated )
g_TinyTimer().FillSchedInfo ( dRes );
return dRes;
}
| 8,748
|
C++
|
.cpp
| 276
| 29.358696
| 156
| 0.705722
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,860
|
searchnode.cpp
|
manticoresoftware_manticoresearch/src/searchnode.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "searchnode.h"
#include "sphinxquery.h"
#include "sphinxint.h"
#include "sphinxplugin.h"
#include "sphinxqcache.h"
#include "attribute.h"
#include "mini_timer.h"
#include "coroutine.h"
#include "secondaryindex.h"
#include <math.h>
//////////////////////////////////////////////////////////////////////////
#if QDEBUG
#define QDEBUGARG(_arg) _arg
#else
#define QDEBUGARG(_arg)
#endif
//////////////////////////////////////////////////////////////////////////
/// costs for max_predicted_time estimations, in nanoseconds
/// YMMV, defaults were estimated in a very specific environment, and then rounded off
int g_iPredictorCostDoc = 64;
int g_iPredictorCostHit = 48;
int g_iPredictorCostSkip = 2048;
int g_iPredictorCostMatch = 64;
//////////////////////////////////////////////////////////////////////////
// EXTENDED MATCHING V2
//////////////////////////////////////////////////////////////////////////
#define SPH_BM25_K1 1.2f
static const float COST_SCALE = 1.0f/1000000.0f;
static volatile bool g_bInterruptNow = false;
static void PrintDocsChunk ( int QDEBUGARG(iCount), int QDEBUGARG(iAtomPos), const ExtDoc_t * QDEBUGARG(pDocs), const char * QDEBUGARG(sNode), void * QDEBUGARG(pNode), const char * sTerm=nullptr )
{
#if QDEBUG
StringBuilder_c tRes;
tRes.Appendf ( "node %s 0x%x:%p getdocs (%d)(%s) = ", sNode ? sNode : "???", iAtomPos, pNode, iCount, ( sTerm ? sTerm : "" ) );
tRes.StartBlock (", ","[","]\n");
for ( int i=0; i<iCount; ++i )
tRes.Appendf ( "0x%x", DWORD ( pDocs[i].m_tRowID ) );
tRes.FinishBlock ( false );
printf ( "%s", tRes.cstr() );
#endif
}
static void PrintHitsChunk ( int QDEBUGARG(iCount), int QDEBUGARG(iAtomPos), const ExtHit_t * QDEBUGARG(pHits), void * QDEBUGARG(pNode) )
{
#if QDEBUG
StringBuilder_c tRes;
tRes.Appendf ( "node 0x%x:%p gethits (%d) = ", iAtomPos, pNode, iCount );
tRes.StartBlock ( ", ", "[", "]\n" );
for ( int i=0; i<iCount; ++i )
tRes.Appendf ( "0x%x:0x%x", DWORD ( pHits[i].m_tRowID ), DWORD ( pHits[i].m_uHitpos ) );
tRes.FinishBlock ( false );
printf ( "%s\n", tRes.cstr() );
#endif
}
static void DebugIndent ( int iLevel )
{
while ( iLevel-- )
printf ( " " );
}
static FORCE_INLINE bool HasDocs ( const ExtDoc_t * pDoc )
{
return pDoc && pDoc->m_tRowID!=INVALID_ROWID;
}
static FORCE_INLINE bool HasHits ( const ExtHit_t * pHit )
{
assert ( pHit );
return pHit->m_tRowID!=INVALID_ROWID;
}
static FORCE_INLINE bool WarmupDocs ( const ExtDoc_t * & pDoc, ExtNode_i * pNode )
{
assert(pNode);
if ( HasDocs(pDoc) )
return true;
pDoc = pNode->GetDocsChunk();
return HasDocs(pDoc);
}
static FORCE_INLINE bool WarmupDocs ( const ExtDoc_t * & pDoc, const ExtHit_t * & pHit, ExtNode_i * pNode )
{
assert(pNode);
if ( HasDocs(pDoc) )
return true;
pDoc = pNode->GetDocsChunk();
if ( !HasDocs(pDoc) )
return false;
pHit = pNode->GetHits(pDoc);
return true;
}
static FORCE_INLINE bool WarmupDocs ( const ExtDoc_t * & pDocL, const ExtDoc_t * pDocR, ExtNode_i * pLeft )
{
assert(pLeft);
if ( HasDocs(pDocL) )
return true;
if ( HasDocs(pDocR) )
pLeft->HintRowID ( pDocR->m_tRowID );
pDocL = pLeft->GetDocsChunk();
return HasDocs(pDocL);
}
//////////////////////////////////////////////////////////////////////////
class ExtNode_c : public ExtNode_i
{
public:
static const int MAX_HITS = 512;
ExtNode_c ( int64_t tmTimeout=0 );
const ExtHit_t * GetHits ( const ExtDoc_t * pDocs ) override;
void DebugDump ( int iLevel ) override;
void SetAtomPos ( int iPos ) override;
int GetAtomPos() const override;
void SetQPosReverse();
void SetMaxTimeout ( int64_t iTimer );
bool TimeExceeded() const override;
int64_t GetMaxTimeout() const override;
protected:
ExtDoc_t m_dDocs[MAX_BLOCK_DOCS];
CSphVector<ExtHit_t> m_dHits;
bool m_bQPosReverse {false};
int m_iAtomPos {0}; ///< we now need it on this level for tricks like expanded keywords within phrases
const int64_t& m_iCheckTimePoint { Threads::Coro::GetNextTimePointUS() };
int64_t m_iMaxTimer; ///< work until this timestamp
virtual void CollectHits ( const ExtDoc_t * pDocs ) = 0;
inline const ExtDoc_t * ReturnDocsChunk ( int iCount, const char * sNode, const char * sTerm=nullptr );
inline const ExtHit_t * ReturnHitsChunk ( int iCount, const char * sNode, bool bReverse );
inline const ExtHit_t * ReturnHits ( bool bReverse );
};
//////////////////////////////////////////////////////////////////////////
// outputs docids returned by rowid iterators
class ExtIterator_c : public ExtNode_c
{
public:
ExtIterator_c ( RowidIterator_i * pIterator ) : m_pIterator ( pIterator ) { assert ( pIterator ); }
int GetQwords ( ExtQwordsHash_t & hQwords ) override { return -1; }
void SetQwordsIDF ( const ExtQwordsHash_t & hQwords ) override {}
void GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const override {}
uint64_t GetWordID () const override { return 0; }
const ExtDoc_t * GetDocsChunk() override;
const ExtHit_t * GetHits ( const ExtDoc_t * pDocs ) final { return nullptr; }
void Reset ( const ISphQwordSetup & tSetup ) override {}
void HintRowID ( RowID_t tRowID ) override { m_pIterator->HintRowID(tRowID); }
bool GotHitless() override { return false; }
NodeEstimate_t Estimate ( int64_t iTotalDocs ) const override { return { 0.0f, 0, 0 }; }
void SetRowidBoundaries ( const RowIdBoundaries_t & tBoundaries ) override {} // no need for filtering as iterators should output already filtered rowids
protected:
RowidIterator_i * m_pIterator = nullptr; // not owned by the node
bool m_bWarmup = true;
RowIdBlock_t m_dIteratorRowIDs;
int m_iDocOffset = 0;
void CollectHits ( const ExtDoc_t * pDocs ) final { assert ( 0 && "ExtRowIdRange_c doesn't collect hits" ); }
};
const ExtDoc_t * ExtIterator_c::GetDocsChunk()
{
if ( m_bWarmup )
{
if ( !m_pIterator->GetNextRowIdBlock(m_dIteratorRowIDs) )
return nullptr;
m_iDocOffset = 0;
m_bWarmup = false;
}
RowID_t * pStart = m_dIteratorRowIDs.Begin()+m_iDocOffset;
RowID_t * pEnd = pStart + Min ( MAX_BLOCK_DOCS-1, m_dIteratorRowIDs.GetLength()-m_iDocOffset );
ExtDoc_t * pDocStart = m_dDocs;
while ( pStart < pEnd )
*pDocStart++ = { *pStart++, 0, 0.0f };
if ( pEnd==m_dIteratorRowIDs.End() )
m_bWarmup = true;
else
m_iDocOffset = pEnd-m_dIteratorRowIDs.Begin();
return ReturnDocsChunk ( pDocStart-m_dDocs, "filter" );
}
//////////////////////////////////////////////////////////////////////////
/// single keyword streamer
template<bool USE_BM25, bool ROWID_LIMITS, bool STATS>
class ExtTerm_T : public ExtNode_c, ISphNoncopyable
{
public:
ExtTerm_T ( ISphQword * pQword, const FieldMask_t & dFields, const ISphQwordSetup & tSetup, bool bNotWeighted ) { Init ( pQword, dFields, tSetup, bNotWeighted ); }
ExtTerm_T ( ISphQword * pQword, const ISphQwordSetup & tSetup );
ExtTerm_T() { m_dQueriedFields.UnsetAll(); }
~ExtTerm_T () override { SafeDelete ( m_pQword ); }
void Init ( ISphQword * pQword, const FieldMask_t & dFields, const ISphQwordSetup & tSetup, bool bNotWeighted );
void Reset ( const ISphQwordSetup & tSetup ) override;
const ExtDoc_t * GetDocsChunk() override;
void CollectHits ( const ExtDoc_t * pMatched ) override;
int GetQwords ( ExtQwordsHash_t & hQwords ) override;
void SetQwordsIDF ( const ExtQwordsHash_t & hQwords ) override;
void GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const override;
bool GotHitless () override { return false; }
int GetDocsCount() const override { return m_pQword->m_iDocs; }
int GetHitsCount() const override { return m_pQword->m_iHits; }
uint64_t GetWordID () const override;
void HintRowID ( RowID_t tRowID ) override;
void SetCollectHits() override { m_bCollectHits = true; }
NodeEstimate_t Estimate ( int64_t iTotalDocs ) const override { return { float(m_pQword->m_iDocs)*COST_SCALE*60.0f, m_pQword->m_iDocs, 1 }; }
void SetRowidBoundaries ( const RowIdBoundaries_t & tBoundaries ) override;
void DebugDump ( int iLevel ) override;
protected:
struct StoredHit_t
{
SphOffset_t m_tHitlistOffset;
RowID_t m_tRowID;
};
ISphQword * m_pQword = nullptr;
FieldMask_t m_dQueriedFields; ///< accepted fields mask
bool m_bHasWideFields = false; ///< whether fields mask for this term refer to fields 32+
float m_fIDF = 0.0f; ///< IDF for this term (might be 0.0f for non-1st occurences in query)
CSphString * m_pWarning = nullptr;
bool m_bNotWeighted = true;
CSphQueryStats * m_pStats = nullptr;
int64_t * m_pNanoBudget = nullptr;
bool m_bCollectHits = false;
RowIdBoundaries_t m_tBoundaries;
CSphVector<StoredHit_t> m_dStoredHits;
};
/// single keyword streamer with artificial hitlist
template<bool USE_BM25, bool ROWID_LIMITS, bool STATS>
class ExtTermHitless_T : public ExtTerm_T<USE_BM25,ROWID_LIMITS,STATS>
{
using BASE = ExtTerm_T<USE_BM25,ROWID_LIMITS,STATS>;
public:
ExtTermHitless_T ( ISphQword * pQword, const FieldMask_t & dFields, const ISphQwordSetup & tSetup, bool bNotWeighted );
void CollectHits ( const ExtDoc_t * pMatched ) final;
bool GotHitless () final { return true; }
};
//////////////////////////////////////////////////////////////////////////
/// position filter policy
template < TermPosFilter_e T >
class TermAcceptor_T
{
public:
TermAcceptor_T ( ISphQword *, const XQNode_t *, const ISphQwordSetup & ) {}
protected:
inline bool IsAcceptableHit ( const ExtHit_t * ) const { return true; }
inline void Reset() {}
};
template<>
class TermAcceptor_T<TERM_POS_FIELD_LIMIT> : public ISphNoncopyable
{
public:
TermAcceptor_T ( ISphQword *, const XQNode_t * pNode, const ISphQwordSetup & );
protected:
inline bool IsAcceptableHit ( const ExtHit_t * ) const;
inline void Reset() {}
private:
const int m_iMaxFieldPos;
};
template<>
class TermAcceptor_T<TERM_POS_ZONES> : public ISphNoncopyable
{
public:
TermAcceptor_T ( ISphQword *, const XQNode_t * pNode, const ISphQwordSetup & tSetup );
protected:
inline bool IsAcceptableHit ( const ExtHit_t * pHit ) const;
inline void Reset();
ISphZoneCheck * m_pZoneChecker; ///< zone-limited searches query ranker about zones
mutable CSphVector<int> m_dZones; ///< zone ids for this particular term
mutable RowID_t m_tLastZoneRowID {INVALID_ROWID};
mutable int m_iCheckFrom {0};
};
//////////////////////////////////////////////////////////////////////////
class BufferedNode_c
{
protected:
const ExtDoc_t * m_pRawDocs = nullptr; ///< chunk start as returned by raw GetDocsChunk() (need to store it for raw CollectHits() calls)
const ExtDoc_t * m_pRawDoc = nullptr; ///< current position in raw docs chunk
const ExtHit_t * m_pRawHit = nullptr; ///< current position in raw hits chunk
ExtDoc_t m_dMyDocs[MAX_BLOCK_DOCS]; ///< all documents within the required pos range
CSphVector<ExtHit_t> m_dMyHits; ///< all hits within the required pos range
BufferedNode_c();
void Reset();
void CopyMatchingHits ( CSphVector<ExtHit_t> & dHits, const ExtDoc_t * pDocs );
};
/// single keyword streamer, with term position filtering
template < TermPosFilter_e T, class NODE >
class ExtConditional_T : public ExtNode_c, public BufferedNode_c, protected TermAcceptor_T<T>
{
using Acceptor_c = TermAcceptor_T<T>;
public:
void Reset ( const ISphQwordSetup & tSetup ) final;
const ExtDoc_t * GetDocsChunk() final;
void CollectHits ( const ExtDoc_t * pDocs ) final;
void HintRowID ( RowID_t tRowID ) override;
bool GotHitless () final { return false; }
int GetQwords ( ExtQwordsHash_t & hQwords ) override;
void SetQwordsIDF ( const ExtQwordsHash_t & hQwords ) override;
void GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const override;
uint64_t GetWordID() const override;
void SetAtomPos ( int iPos ) override;
int GetAtomPos() const override;
NodeEstimate_t Estimate ( int64_t iTotalDocs ) const override { return m_tNode.Estimate(iTotalDocs); }
void SetRowidBoundaries ( const RowIdBoundaries_t & tBoundaries ) override { m_tNode.SetRowidBoundaries(tBoundaries); }
protected:
NODE m_tNode;
ExtConditional_T ( ISphQword * pQword, const XQNode_t * pNode, const ISphQwordSetup & tSetup );
};
/// single keyword streamer, with term position filtering
template <TermPosFilter_e T, bool USE_BM25, bool ROWID_LIMITS, bool STATS>
class ExtTermPos_T : public ExtConditional_T<T,ExtTerm_T<USE_BM25,ROWID_LIMITS,STATS>>
{
public:
ExtTermPos_T( ISphQword * pQword, const XQNode_t * pNode, const ISphQwordSetup & tSetup )
: ExtConditional_T<T,ExtTerm_T<USE_BM25,ROWID_LIMITS,STATS>> ( pQword, pNode, tSetup )
{
this->m_tNode.Init ( pQword, pNode->m_dSpec.m_dFieldMask, tSetup, pNode->m_bNotWeighted );
}
};
/// multi-node binary-operation streamer traits
class ExtTwofer_c : public ExtNode_c
{
public:
ExtTwofer_c ( ExtNode_i * pFirst, ExtNode_i * pSecond );
ExtTwofer_c () {} ///< to be used in pair with Init();
void Init ( ExtNode_i * pLeft, ExtNode_i * pRight );
void Reset ( const ISphQwordSetup & tSetup ) override;
int GetQwords ( ExtQwordsHash_t & hQwords ) override;
void SetQwordsIDF ( const ExtQwordsHash_t & hQwords ) override;
void GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const override;
bool GotHitless () override;
void HintRowID ( RowID_t tRowID ) override;
uint64_t GetWordID() const override;
void SetCollectHits() override;
NodeEstimate_t Estimate ( int64_t iTotalDocs ) const override;
void SetRowidBoundaries ( const RowIdBoundaries_t & tBoundaries ) override;
void SetNodePos ( WORD uPosLeft, WORD uPosRight );
protected:
std::unique_ptr<ExtNode_i> m_pLeft;
std::unique_ptr<ExtNode_i> m_pRight;
const ExtDoc_t * m_pDocL = nullptr;
const ExtDoc_t * m_pDocR = nullptr;
WORD m_uNodePosL = 0;
WORD m_uNodePosR = 0;
void DebugDumpT ( const char * sName, int iLevel );
};
/// A-and-B streamer
class ExtAnd_c : public ExtTwofer_c
{
public:
ExtAnd_c ( ExtNode_i * pLeft, ExtNode_i * pRight );
ExtAnd_c() {} ///< to be used with Init()
const ExtDoc_t * GetDocsChunk() override;
void CollectHits ( const ExtDoc_t * pDocs ) override;
NodeEstimate_t Estimate ( int64_t iTotalDocs ) const override;
int GetDocsCount() const override { return m_bEmpty ? 0 : ExtTwofer_c::GetDocsCount(); }
void DebugDump ( int iLevel ) override;
private:
bool m_bEmpty = false;
};
// AND that returns hits from right node only
class ExtAndRightHits_c : public ExtAnd_c
{
public:
ExtAndRightHits_c ( ExtNode_i * pLeft, ExtNode_i * pRight ) : ExtAnd_c ( pLeft, pRight ) {}
int GetQwords ( ExtQwordsHash_t & hQwords ) override { assert(m_pRight); return m_pRight->GetQwords(hQwords); }
const ExtHit_t * GetHits ( const ExtDoc_t * pDocs ) override { assert(m_pRight); return m_pRight->GetHits(pDocs); }
void Reset ( const ISphQwordSetup & tSetup ) override;
void DebugDump ( int iLevel ) override { DebugDumpT ( "ExtAndRightHits", iLevel ); }
};
void ExtAndRightHits_c::Reset ( const ISphQwordSetup & tSetup )
{
assert(m_pRight);
m_pRight->Reset(tSetup);
m_pRight.release();
}
template <bool USE_BM25, bool TEST_FIELDS, bool ROWID_LIMITS>
class ExtMultiAnd_T : public ExtNode_c
{
public:
ExtMultiAnd_T ( const VecTraits_T<XQNode_t*> & dXQNodes, const ISphQwordSetup & tSetup );
~ExtMultiAnd_T() override;
const ExtDoc_t * GetDocsChunk() override;
void CollectHits ( const ExtDoc_t * pDocs ) override;
void Reset ( const ISphQwordSetup & tSetup ) override;
int GetQwords ( ExtQwordsHash_t & hQwords ) override;
void SetQwordsIDF ( const ExtQwordsHash_t & hQwords ) override;
void GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const override;
uint64_t GetWordID () const override;
bool GotHitless () override { return false; }
int GetDocsCount() const override;
void HintRowID ( RowID_t tRowID ) override;
void SetCollectHits() override { m_bCollectHits = true; }
void DebugDump ( int iLevel ) override;
NodeEstimate_t Estimate ( int64_t iTotalDocs ) const override;
void SetRowidBoundaries ( const RowIdBoundaries_t & tBoundaries ) override { m_tBoundaries = tBoundaries; }
private:
struct NodeInfo_t
{
ISphQword * m_pQword {nullptr};
RowID_t m_tRowID {INVALID_ROWID};
bool m_bHitsOver {false};
float m_fIDF {0.0f};
WORD m_uNodepos {0};
int m_iAtomPos {0};
FieldMask_t m_dQueriedFields;
bool m_bHasWideFields {false};
bool m_bNotWeighted {true};
void UpdateWideFieldFlag ( const ISphQwordSetup & tSetup );
bool FitsFields() const;
};
struct StoredMultiHit_t
{
CSphFixedVector<SphOffset_t> m_dHitlistOffsets;
RowID_t m_tRowID;
// don't change to default ctr, centos6 will fail to compile!
StoredMultiHit_t () : m_dHitlistOffsets ( 0 ) {}
};
struct HitInfo_t
{
ISphQword * m_pQword;
WORD m_uNodePos;
WORD m_uQueryPos;
Hitpos_t m_uHitpos;
};
struct SelectivitySorter_t
{
bool IsLess ( const NodeInfo_t & tA, const NodeInfo_t & tB ) const { return tA.m_pQword->m_iDocs < tB.m_pQword->m_iDocs; }
};
struct HitWithQpos_t
{
int m_iNode;
Hitpos_t m_uHit;
WORD m_uQueryPos;
HitWithQpos_t() = default;
HitWithQpos_t ( int iNode, Hitpos_t uHit, WORD uQueryPos );
static bool IsLess ( const HitWithQpos_t & a, const HitWithQpos_t & b ) { return ( a.m_uHit<b.m_uHit ) || ( a.m_uHit==b.m_uHit && a.m_uQueryPos<=b.m_uQueryPos ); }
};
bool m_bFirstChunk {true};
bool m_bCollectHits {false};
CSphVector<NodeInfo_t> m_dNodes;
CSphVector<StoredMultiHit_t> m_dStoredHits;
int m_iNodesSet {0};
RowIdBoundaries_t m_tBoundaries;
CSphString * m_pWarning {nullptr};
CSphQueryStats * m_pStats {nullptr};
int64_t * m_pNanoBudget {nullptr};
CSphFixedVector<uint64_t> m_dWordIds;
CSphQueue<HitWithQpos_t, HitWithQpos_t > m_tQueue;
FORCE_INLINE bool AdvanceQwords();
FORCE_INLINE RowID_t Advance ( int iNode );
FORCE_INLINE RowID_t Advance ( int iNode, RowID_t tRowID );
FORCE_INLINE bool FitsFields ( const NodeInfo_t & tNode ) const;
FORCE_INLINE DWORD GetDocFieldsMask() const;
FORCE_INLINE float GetTFIDF() const;
int GetQword ( NodeInfo_t & tNode, ExtQwordsHash_t & hQwords );
FORCE_INLINE void PushNextHit ( int iNode );
FORCE_INLINE void MergeHitsN ( const StoredMultiHit_t & tStoredHit );
FORCE_INLINE void MergeHits2 ( const StoredMultiHit_t & tStoredHit );
FORCE_INLINE void MergeHits3 ( const StoredMultiHit_t & tStoredHit );
void InitHitMerge ( HitInfo_t & tHitInfo, int iNode, const StoredMultiHit_t & tStoredHit );
void DoHitMerge ( RowID_t tRowID, HitInfo_t & tLeft, HitInfo_t & tRight );
void DoHitMerge ( RowID_t tRowID, HitInfo_t & tHit1, HitInfo_t & tHit2, HitInfo_t & tHit3 );
void CopyHits ( RowID_t tRowID, HitInfo_t & tHitInfo, int iNode );
FORCE_INLINE void AddHit ( RowID_t tRowID, HitInfo_t & tHit, int iNode );
static bool IsHitLess ( const HitInfo_t & tLeft, const HitInfo_t & tRight ) { return tLeft.m_uHitpos<tRight.m_uHitpos || ( tLeft.m_uHitpos==tRight.m_uHitpos && tLeft.m_uQueryPos<=tRight.m_uQueryPos ); }
};
class ExtAndZonespanned_c : public ExtAnd_c
{
friend class ExtAndZonespan_c;
public:
void CollectHits ( const ExtDoc_t * pDocs ) override;
void DebugDump ( int iLevel ) override;
protected:
bool IsSameZonespan ( const ExtHit_t * pHit1, const ExtHit_t * pHit2 ) const;
ISphZoneCheck * m_pZoneChecker = nullptr;
CSphVector<int> m_dZones;
};
class ExtAndZonespan_c : public ExtConditional_T < TERM_POS_NONE, ExtAndZonespanned_c >
{
public:
ExtAndZonespan_c ( ExtNode_i * pFirst, ExtNode_i * pSecond, const ISphQwordSetup & tSetup, const XQNode_t * pNode )
: ExtConditional_T<TERM_POS_NONE,ExtAndZonespanned_c> ( NULL, pNode, tSetup )
{
m_tNode.Init ( pFirst, pSecond );
m_tNode.m_pZoneChecker = tSetup.m_pZoneChecker;
m_tNode.m_dZones = pNode->m_dSpec.m_dZones;
}
};
/// A-or-B streamer
class ExtOr_c : public ExtTwofer_c
{
public:
ExtOr_c ( ExtNode_i * pLeft, ExtNode_i * pRight );
const ExtDoc_t * GetDocsChunk() override;
void CollectHits ( const ExtDoc_t * pDocs ) override;
void DebugDump ( int iLevel ) override;
NodeEstimate_t Estimate ( int64_t iTotalDocs ) const override;
};
/// A-maybe-B streamer
class ExtMaybe_c : public ExtOr_c
{
public:
ExtMaybe_c ( ExtNode_i * pLeft, ExtNode_i * pRight );
const ExtDoc_t * GetDocsChunk() override;
void DebugDump ( int iLevel ) override;
};
/// A-and-not-B streamer
class ExtAndNot_c : public ExtTwofer_c
{
public:
ExtAndNot_c ( ExtNode_i * pLeft, ExtNode_i * pRight );
const ExtDoc_t * GetDocsChunk() override;
void CollectHits ( const ExtDoc_t * pDocs ) override;
void Reset ( const ISphQwordSetup & tSetup ) override;
void SetCollectHits() override;
void DebugDump ( int iLevel ) override;
protected:
bool m_bPassthrough {false};
};
/// generic operator over N nodes
class ExtNWay_c : public ExtNode_c, public BufferedNode_c
{
public:
ExtNWay_c ( const CSphVector<ExtNode_i *> & dNodes, const ISphQwordSetup & tSetup );
~ExtNWay_c() override;
void Reset ( const ISphQwordSetup & tSetup ) override;
int GetQwords ( ExtQwordsHash_t & hQwords ) override;
void SetQwordsIDF ( const ExtQwordsHash_t & hQwords ) override;
void GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const override;
bool GotHitless () override { return false; }
void HintRowID ( RowID_t tRowID ) override;
uint64_t GetWordID() const override;
NodeEstimate_t Estimate ( int64_t iTotalDocs ) const override { assert(m_pNode); return m_pNode->Estimate(iTotalDocs); }
void SetRowidBoundaries ( const RowIdBoundaries_t & tBoundaries ) override { assert(m_pNode); m_pNode->SetRowidBoundaries(tBoundaries); }
protected:
ExtNode_i * m_pNode {nullptr}; ///< my and-node for all the terms
const ExtDoc_t * m_pDocs {nullptr}; ///< current docs chunk from and-node
const ExtHit_t * m_pHits {nullptr}; ///< current hits chunk from and-node
inline void ConstructNode ( const CSphVector<ExtNode_i *> & dNodes, const CSphVector<WORD> & dPositions, const ISphQwordSetup & tSetup );
};
struct ExtNodeTF_fn
{
bool IsLess ( ExtNode_i * pA, ExtNode_i * pB ) const
{
return pA->GetDocsCount() < pB->GetDocsCount();
}
};
struct ExtNodeTFExt_fn
{
const CSphVector<ExtNode_i *> & m_dNodes;
explicit ExtNodeTFExt_fn ( const CSphVector<ExtNode_i *> & dNodes )
: m_dNodes ( dNodes )
{}
ExtNodeTFExt_fn ( const ExtNodeTFExt_fn & rhs )
: m_dNodes ( rhs.m_dNodes )
{}
bool IsLess ( WORD uA, WORD uB ) const
{
return m_dNodes[uA]->GetDocsCount() < m_dNodes[uB]->GetDocsCount();
}
private:
const ExtNodeTFExt_fn & operator = ( const ExtNodeTFExt_fn & )
{
return *this;
}
};
/// FSM is Finite State Machine
template < class FSM >
class ExtNWay_T : public ExtNWay_c, private FSM
{
public:
ExtNWay_T ( const CSphVector<ExtNode_i *> & dNodes, const XQNode_t & tNode, const ISphQwordSetup & tSetup );
const ExtDoc_t * GetDocsChunk() override;
void CollectHits ( const ExtDoc_t * pDocs ) override;
void DebugDump ( int iLevel ) override;
};
class FSMphrase_c
{
protected:
struct State_t
{
int m_iTagQword;
DWORD m_uExpHitposWithField;
};
CSphVector<int> m_dQposDelta; ///< next expected qpos delta for each existing qpos (for skipped stopwords case)
CSphVector<int> m_dAtomPos; ///< lets use it as finite automata states and keep references on it
CSphVector<State_t> m_dStates; ///< pointers to states of finite automata
DWORD m_uQposMask {0};
FSMphrase_c ( const CSphVector<ExtNode_i *> & dQwords, const XQNode_t & tNode, const ISphQwordSetup & tSetup );
inline bool HitFSM ( const ExtHit_t* pHit, CSphVector<ExtHit_t> & dHits );
inline void ResetFSM();
static const char * GetName() { return "ExtPhrase"; }
};
/// exact phrase streamer
using ExtPhrase_c = ExtNWay_T<FSMphrase_c>;
/// proximity streamer
class FSMproximity_c
{
protected:
int m_iMaxDistance;
DWORD m_uWordsExpected;
DWORD m_uMinQpos;
DWORD m_uQLen;
DWORD m_uExpPos = 0;
CSphVector<DWORD> m_dProx; // proximity hit position for i-th word
CSphVector<int> m_dDeltas; // used for weight calculation
DWORD m_uWords = 0;
int m_iMinQindex = 65535;
DWORD m_uQposMask = 0;
FSMproximity_c ( const CSphVector<ExtNode_i *> & dQwords, const XQNode_t & tNode, const ISphQwordSetup & tSetup );
inline bool HitFSM ( const ExtHit_t * pHit, CSphVector<ExtHit_t> & dHits );
inline void ResetFSM();
static const char * GetName() { return "ExtProximity"; }
};
/// exact phrase streamer
using ExtProximity_c = ExtNWay_T<FSMproximity_c>;
/// proximity streamer
class FSMmultinear_c
{
protected:
int m_iNear; ///< the NEAR distance
DWORD m_uPrelastP = 0;
DWORD m_uPrelastML = 0;
DWORD m_uPrelastSL = 0;
DWORD m_uPrelastW = 0;
DWORD m_uLastP = 0; ///< position of the last hit
DWORD m_uLastML = 0; ///< the length of the previous hit
DWORD m_uLastSL = 0; ///< the length of the previous hit in Query
DWORD m_uLastW = 0; ///< last weight
DWORD m_uWordsExpected; ///< now many hits we're expect
DWORD m_uWeight = 0; ///< weight accum
DWORD m_uFirstHit = 0; ///< hitpos of the beginning of the match chain
WORD m_uFirstNpos = 0; ///< N-position of the head of the chain
WORD m_uFirstQpos = 65535; ///< Q-position of the head of the chain (for twofers)
CSphVector<WORD> m_dNpos; ///< query positions for multinear
CSphVector<ExtHit_t> m_dRing; ///< ring buffer for multihit data
int m_iRing = 0; ///< the head of the ring
bool m_bTwofer; ///< if we have 2- or N-way NEAR
bool m_bQposMask;
FSMmultinear_c ( const CSphVector<ExtNode_i *> & dNodes, const XQNode_t & tNode, const ISphQwordSetup & tSetup );
inline bool HitFSM ( const ExtHit_t * pHit, CSphVector<ExtHit_t> & dHits );
inline void ResetFSM();
static const char * GetName() { return "ExtMultinear"; }
private:
inline int RingTail() const;
inline void Add2Ring ( const ExtHit_t * pHit );
inline void ShiftRing();
};
/// exact phrase streamer
using ExtMultinear_c = ExtNWay_T<FSMmultinear_c>;
/// quorum streamer
class ExtQuorum_c : public ExtNode_c, public BufferedNode_c
{
friend struct QuorumNodeAtomPos_fn;
public:
ExtQuorum_c ( CSphVector<ExtNode_i*> & dQwords, const XQNode_t & tNode, const ISphQwordSetup & tSetup );
~ExtQuorum_c() override;
void Reset ( const ISphQwordSetup & tSetup ) override;
const ExtDoc_t * GetDocsChunk() override;
void CollectHits ( const ExtDoc_t * pDocs ) override;
int GetQwords ( ExtQwordsHash_t & hQwords ) override;
void SetQwordsIDF ( const ExtQwordsHash_t & hQwords ) override;
void GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const override;
uint64_t GetWordID () const override;
bool GotHitless () override { return false; }
void HintRowID ( RowID_t tRowID ) override;
NodeEstimate_t Estimate ( int64_t iTotalDocs ) const override;
void SetRowidBoundaries ( const RowIdBoundaries_t & tBoundaries ) override;
static int GetThreshold ( const XQNode_t & tNode, int iQwords );
private:
struct TermTuple_t
{
ExtNode_i * m_pTerm; ///< my children nodes (simply ExtTerm_c for now, not true anymore)
const ExtDoc_t * m_pCurDoc; ///< current positions into children doclists
const ExtHit_t * m_pCurHit; ///< current positions into children hitlists
int m_iCount; ///< terms count in case of dupes
};
CSphVector<TermTuple_t> m_dInitialChildren; ///< my children nodes (simply ExtTerm_c for now)
CSphVector<TermTuple_t> m_dChildren;
int m_iThresh; ///< keyword count threshold
// FIXME!!! also skip hits processing for children w\o constrains ( zones or field limit )
bool m_bHasDupes; ///< should we analyze hits on docs collecting
// check for hits that matches and return flag that docs might be advanced
bool CollectMatchingHits ( RowID_t tRowID, int iQuorum );
int CountQuorum ( bool bFixDupes );
};
/// A-B-C-in-this-order streamer
class ExtOrder_c : public ExtNode_c, public BufferedNode_c
{
public:
ExtOrder_c ( const CSphVector<ExtNode_i *> & dChildren, const ISphQwordSetup & tSetup );
~ExtOrder_c() override;
void Reset ( const ISphQwordSetup & tSetup ) override;
const ExtDoc_t * GetDocsChunk() override;
void CollectHits ( const ExtDoc_t * pDocs ) override;
int GetQwords ( ExtQwordsHash_t & hQwords ) override;
void SetQwordsIDF ( const ExtQwordsHash_t & hQwords ) override;
void GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const override;
bool GotHitless () override { return false; }
uint64_t GetWordID () const override;
void HintRowID ( RowID_t tRowID ) override;
NodeEstimate_t Estimate ( int64_t iTotalDocs ) const override;
void SetRowidBoundaries ( const RowIdBoundaries_t & tBoundaries ) override;
protected:
CSphVector<ExtNode_i *> m_dChildren;
CSphVector<const ExtDoc_t*> m_dChildDocsChunk; ///< last document chunk (for hit fetching)
CSphVector<const ExtDoc_t*> m_dChildDoc; ///< current position in document chunk
CSphVector<const ExtHit_t*> m_dChildHit; ///< current position in hits chunk
bool m_bDone;
int GetChildIdWithNextHit ( RowID_t tRowID ); ///< get next hit within given document, and return its child-id
bool GetMatchingHits ( RowID_t tRowID ); ///< process candidate hits and stores actual matches while we can
};
/// same-text-unit streamer
/// (aka, A and B within same sentence, or same paragraph)
template <bool ROWID_LIMITS>
class ExtUnit_T : public ExtNode_c, public BufferedNode_c
{
public:
ExtUnit_T ( ExtNode_i * pFirst, ExtNode_i * pSecond, const FieldMask_t& dFields, const ISphQwordSetup & tSetup, const char * sUnit );
~ExtUnit_T() override;
const ExtDoc_t * GetDocsChunk() override;
void CollectHits ( const ExtDoc_t * pDocs ) override;
void Reset ( const ISphQwordSetup & tSetup ) override;
int GetQwords ( ExtQwordsHash_t & hQwords ) override;
void SetQwordsIDF ( const ExtQwordsHash_t & hQwords ) override;
void GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const override;
uint64_t GetWordID () const override;
bool GotHitless () override { return false; }
void HintRowID ( RowID_t tRowID ) override;
void DebugDump ( int iLevel ) override;
NodeEstimate_t Estimate ( int64_t iTotalDocs ) const override;
void SetRowidBoundaries ( const RowIdBoundaries_t & tBoundaries ) override;
protected:
void FilterHits ( const ExtDoc_t * pDoc1, const ExtDoc_t * pDoc2, const ExtHit_t * & pHit1, const ExtHit_t * & pHit2, const ExtHit_t * & pDotHit, DWORD uSentenceEnd, RowID_t tRowID, int & iDoc );
private:
ExtNode_i * m_pArg1 {nullptr}; ///< left arg
ExtNode_i * m_pArg2 {nullptr}; ///< right arg
ExtNode_i * m_pDot {nullptr}; ///< dot positions
const ExtDoc_t * m_pDocs1 {nullptr}; ///< last chunk start
const ExtDoc_t * m_pDocs2 {nullptr}; ///< last chunk start
const ExtDoc_t * m_pDotDocs {nullptr}; ///< last chunk start
const ExtDoc_t * m_pDoc1 {nullptr}; ///< current in-chunk ptr
const ExtDoc_t * m_pDoc2 {nullptr}; ///< current in-chunk ptr
const ExtDoc_t * m_pDotDoc {nullptr}; ///< current in-chunk ptr
const ExtHit_t * m_pHit1 {nullptr}; ///< current in-chunk ptr
const ExtHit_t * m_pHit2 {nullptr}; ///< current in-chunk ptr
const ExtHit_t * m_pDotHit {nullptr}; ///< current in-chunk ptr
// need to keep this between GetDocsChunk
// as one call of GetDocsChunk might fetch m_pDotDocs
// but fetch m_pDotHit many calls later
bool m_bNeedDotHits = false;
};
class ExtNotNear_c : public ExtTwofer_c, public BufferedNode_c
{
public:
ExtNotNear_c ( ExtNode_i * pMust, ExtNode_i * pNot, int iDist );
const ExtDoc_t * GetDocsChunk() override;
void CollectHits ( const ExtDoc_t * pDocs ) override;
void Reset ( const ISphQwordSetup & tSetup ) override;
void DebugDump ( int iLevel ) override;
private:
const int m_iDist = 1;
CSphString m_sNodeName;
const ExtHit_t * m_pHitL = nullptr;
const ExtHit_t * m_pHitR = nullptr;
bool FilterHits ( RowID_t tRowID, const ExtHit_t * & pMust, const ExtHit_t * & pNot ); // returns true if doc has matched hits
};
//////////////////////////////////////////////////////////////////////////
static ISphQword * CreateQueryWord ( const XQKeyword_t & tWord, const ISphQwordSetup & tSetup, DictRefPtr_c pZonesDict = nullptr )
{
BYTE sTmp [ 3*SPH_MAX_WORD_LEN + 16 ];
strncpy ( (char*)sTmp, tWord.m_sWord.cstr(), sizeof(sTmp) );
sTmp[sizeof(sTmp)-1] = '\0';
ISphQword * pWord = tSetup.QwordSpawn ( tWord );
pWord->m_sWord = tWord.m_sWord;
if (!pZonesDict)
pZonesDict = tSetup.Dict();
pWord->m_uWordID = tWord.m_bMorphed
? pZonesDict->GetWordIDNonStemmed ( sTmp )
: pZonesDict->GetWordID ( sTmp );
pWord->m_sDictWord = (char*)sTmp;
pWord->m_bExpanded = tWord.m_bExpanded;
tSetup.QwordSetup ( pWord );
if ( tWord.m_bFieldStart && tWord.m_bFieldEnd ) pWord->m_iTermPos = TERM_POS_FIELD_STARTEND;
else if ( tWord.m_bFieldStart ) pWord->m_iTermPos = TERM_POS_FIELD_START;
else if ( tWord.m_bFieldEnd ) pWord->m_iTermPos = TERM_POS_FIELD_END;
else pWord->m_iTermPos = TERM_POS_NONE;
pWord->m_fBoost = tWord.m_fBoost;
pWord->m_iAtomPos = tWord.m_iAtomPos;
return pWord;
}
struct AtomPosQWord_fn
{
bool operator () ( ISphQword * ) const { return true; }
};
struct AtomPosExtNode_fn
{
bool operator () ( ExtNode_i * pNode ) const { return !pNode->GotHitless(); }
};
template <typename T, typename NODE_CHECK>
int CountAtomPos ( const CSphVector<T *> & dNodes, const NODE_CHECK & fnCheck )
{
if ( dNodes.GetLength()<2 )
return dNodes.GetLength();
int iMinPos = INT_MAX;
int iMaxPos = 0;
ARRAY_FOREACH ( i, dNodes )
{
T * pNode = dNodes[i];
if ( fnCheck ( pNode ) )
{
iMinPos = Min ( pNode->GetAtomPos(), iMinPos );
iMaxPos = Max ( pNode->GetAtomPos(), iMaxPos );
}
}
if ( iMinPos==INT_MAX )
return 0;
CSphBitvec dAtomPos ( iMaxPos - iMinPos + 1 );
ARRAY_FOREACH ( i, dNodes )
{
if ( fnCheck ( dNodes[i] ) )
dAtomPos.BitSet ( dNodes[i]->GetAtomPos() - iMinPos );
}
return dAtomPos.BitCount();
}
template < typename T >
static ExtNode_i * CreateMultiNode ( const XQNode_t * pQueryNode, const ISphQwordSetup & tSetup, bool bNeedsHitlist, bool bUseBM25, const RowIdBoundaries_t * pBoundaries )
{
///////////////////////////////////
// virtually plain (expanded) node
///////////////////////////////////
assert ( pQueryNode );
if ( pQueryNode->m_dChildren.GetLength() )
{
CSphVector<ExtNode_i *> dNodes;
CSphVector<ExtNode_i *> dTerms;
ARRAY_FOREACH ( i, pQueryNode->m_dChildren )
{
ExtNode_i * pTerm = ExtNode_i::Create ( pQueryNode->m_dChildren[i], tSetup, bUseBM25, pBoundaries );
assert ( !pTerm || pTerm->GetAtomPos()>=0 );
if ( pTerm )
{
if ( !pTerm->GotHitless() )
dNodes.Add ( pTerm );
else
dTerms.Add ( pTerm );
}
}
int iAtoms = CountAtomPos ( dNodes, AtomPosExtNode_fn() );
if ( iAtoms<2 )
{
ARRAY_FOREACH ( i, dNodes )
SafeDelete ( dNodes[i] );
ARRAY_FOREACH ( i, dTerms )
SafeDelete ( dTerms[i] );
if ( tSetup.m_pWarning )
tSetup.m_pWarning->SetSprintf ( "can't create phrase node, hitlists unavailable (hitlists=%d, nodes=%d)", iAtoms, pQueryNode->m_dChildren.GetLength() );
return NULL;
}
// FIXME! tricky combo again
// quorum+expand used KeywordsEqual() path to drill down until actual nodes
ExtNode_i * pResult = new T ( dNodes, *pQueryNode, tSetup );
// AND result with the words that had no hitlist
if ( dTerms.GetLength () )
{
pResult = new ExtAnd_c ( pResult, dTerms[0] );
for ( int i=1; i<dTerms.GetLength (); i++ )
pResult = new ExtAnd_c ( pResult, dTerms[i] );
}
if ( pQueryNode->GetCount() )
return tSetup.m_pNodeCache->CreateProxy ( pResult, pQueryNode, tSetup );
return pResult;
}
//////////////////////
// regular plain node
//////////////////////
ExtNode_i * pResult = NULL;
CSphVector<ISphQword *> dQwordsHit; // have hits
CSphVector<ISphQword *> dQwords; // don't have hits
// partition phrase words
const CSphVector<XQKeyword_t> & dWords = pQueryNode->m_dWords;
ARRAY_FOREACH ( i, dWords )
{
ISphQword * pWord = CreateQueryWord ( dWords[i], tSetup );
if ( pWord->m_bHasHitlist || !bNeedsHitlist )
dQwordsHit.Add ( pWord );
else
dQwords.Add ( pWord );
}
// see if we can create the node
int iAtoms = CountAtomPos ( dQwordsHit, AtomPosQWord_fn() );
if ( iAtoms<2 )
{
ARRAY_FOREACH ( i, dQwords )
SafeDelete ( dQwords[i] );
ARRAY_FOREACH ( i, dQwordsHit )
SafeDelete ( dQwordsHit[i] );
if ( tSetup.m_pWarning )
tSetup.m_pWarning->SetSprintf ( "can't create phrase node, hitlists unavailable (hitlists=%d, nodes=%d)",
iAtoms, dWords.GetLength() );
return NULL;
} else
{
// at least two words have hitlists, creating phrase node
assert ( pQueryNode->m_dWords.GetLength() );
assert ( pQueryNode->GetOp()==SPH_QUERY_PHRASE || pQueryNode->GetOp()==SPH_QUERY_PROXIMITY || pQueryNode->GetOp()==SPH_QUERY_QUORUM );
// create nodes
CSphVector<ExtNode_i *> dNodes;
ARRAY_FOREACH ( i, dQwordsHit )
{
dNodes.Add ( ExtNode_i::Create ( dQwordsHit[i], pQueryNode, tSetup, bUseBM25, pBoundaries ) );
dNodes.Last()->SetAtomPos ( dQwordsHit[i]->m_iAtomPos );
}
pResult = new T ( dNodes, *pQueryNode, tSetup );
}
// AND result with the words that had no hitlist
if ( dQwords.GetLength() )
{
ExtNode_i * pNode = ExtNode_i::Create ( dQwords[0], pQueryNode, tSetup, bUseBM25, pBoundaries );
for ( int i=1; i<dQwords.GetLength(); i++ )
pNode = new ExtAnd_c ( pNode, ExtNode_i::Create ( dQwords[i], pQueryNode, tSetup, bUseBM25, pBoundaries ) );
pResult = new ExtAnd_c ( pResult, pNode );
}
if ( pQueryNode->GetCount() )
return tSetup.m_pNodeCache->CreateProxy ( pResult, pQueryNode, tSetup );
return pResult;
}
static ExtNode_i * CreateOrderNode ( const XQNode_t * pNode, const ISphQwordSetup & tSetup, bool bUseBM25, const RowIdBoundaries_t * pBoundaries )
{
if ( pNode->m_dChildren.GetLength()<2 )
{
if ( tSetup.m_pWarning )
tSetup.m_pWarning->SetSprintf ( "order node requires at least two children" );
return NULL;
}
CSphVector<ExtNode_i *> dChildren;
ARRAY_FOREACH ( i, pNode->m_dChildren )
{
ExtNode_i * pChild = ExtNode_i::Create ( pNode->m_dChildren[i], tSetup, bUseBM25, pBoundaries );
if ( !pChild || pChild->GotHitless() )
{
if ( tSetup.m_pWarning )
tSetup.m_pWarning->SetSprintf ( "failed to create order node, hitlist unavailable" );
ARRAY_FOREACH ( j, dChildren )
SafeDelete ( dChildren[j] );
return NULL;
}
dChildren.Add ( pChild );
}
ExtNode_i * pResult = new ExtOrder_c ( dChildren, tSetup );
if ( pNode->GetCount() )
return tSetup.m_pNodeCache->CreateProxy ( pResult, pNode, tSetup );
return pResult;
}
ExtNode_i * ExtNode_i::Create ( const XQKeyword_t & tWord, const XQNode_t * pNode, const ISphQwordSetup & tSetup, bool bUseBM25, bool bRowidLimits )
{
return Create ( CreateQueryWord ( tWord, tSetup ), pNode, tSetup, bUseBM25, bRowidLimits );
}
template <TermPosFilter_e TERMPOS>
static ExtNode_i * CreateTermposNode ( ISphQword * pQword, const XQNode_t * pNode, const ISphQwordSetup & tSetup, bool bUseBM25, bool bRowidLimits )
{
int iSwitch = 4*(bUseBM25?1:0) + 2*(bRowidLimits?1:0) + (tSetup.m_pStats?1:0);
switch ( iSwitch )
{
case 0: return new ExtTermPos_T<TERMPOS, false, false, false> ( pQword, pNode, tSetup );
case 1: return new ExtTermPos_T<TERMPOS, false, false, true> ( pQword, pNode, tSetup );
case 2: return new ExtTermPos_T<TERMPOS, false, true, false> ( pQword, pNode, tSetup );
case 3: return new ExtTermPos_T<TERMPOS, false, true, true> ( pQword, pNode, tSetup );
case 4: return new ExtTermPos_T<TERMPOS, true, false, false> ( pQword, pNode, tSetup );
case 5: return new ExtTermPos_T<TERMPOS, true, false, true> ( pQword, pNode, tSetup );
case 6: return new ExtTermPos_T<TERMPOS, true, true, false> ( pQword, pNode, tSetup );
case 7: return new ExtTermPos_T<TERMPOS, true, true, true> ( pQword, pNode, tSetup );
default:
assert ( 0 && "Internal error" );
return nullptr;
}
}
static ExtNode_i * CreateHitlessNode ( ISphQword * pQword, const FieldMask_t & tFieldMask, const ISphQwordSetup & tSetup, bool bNotWeighted, bool bUseBM25, bool bRowidLimits )
{
int iSwitch = 4*(bUseBM25?1:0) + 2*(bRowidLimits?1:0) + (tSetup.m_pStats?1:0);
switch ( iSwitch )
{
case 0: return new ExtTermHitless_T<false, false, false>( pQword, tFieldMask, tSetup, bNotWeighted );
case 1: return new ExtTermHitless_T<false, false, true> ( pQword, tFieldMask, tSetup, bNotWeighted );
case 2: return new ExtTermHitless_T<false, true, false>( pQword, tFieldMask, tSetup, bNotWeighted );
case 3: return new ExtTermHitless_T<false, true, true> ( pQword, tFieldMask, tSetup, bNotWeighted );
case 4: return new ExtTermHitless_T<true, false, false>( pQword, tFieldMask, tSetup, bNotWeighted );
case 5: return new ExtTermHitless_T<true, false, true> ( pQword, tFieldMask, tSetup, bNotWeighted );
case 6: return new ExtTermHitless_T<true, true, false>( pQword, tFieldMask, tSetup, bNotWeighted );
case 7: return new ExtTermHitless_T<true, true, true> ( pQword, tFieldMask, tSetup, bNotWeighted );
default:
assert ( 0 && "Internal error" );
return nullptr;
}
}
static ExtNode_i * CreateTermNode ( ISphQword * pQword, const FieldMask_t & tFieldMask, const ISphQwordSetup & tSetup, bool bNotWeighted, bool bUseBM25, bool bRowidLimits )
{
int iSwitch = 4*(bUseBM25?1:0) + 2*(bRowidLimits?1:0) + (tSetup.m_pStats?1:0);
switch ( iSwitch )
{
case 0: return new ExtTerm_T<false, false, false> ( pQword, tFieldMask, tSetup, bNotWeighted );
case 1: return new ExtTerm_T<false, false, true> ( pQword, tFieldMask, tSetup, bNotWeighted );
case 2: return new ExtTerm_T<false, true, false> ( pQword, tFieldMask, tSetup, bNotWeighted );
case 3: return new ExtTerm_T<false, true, true> ( pQword, tFieldMask, tSetup, bNotWeighted );
case 4: return new ExtTerm_T<true, false, false> ( pQword, tFieldMask, tSetup, bNotWeighted );
case 5: return new ExtTerm_T<true, false, true> ( pQword, tFieldMask, tSetup, bNotWeighted );
case 6: return new ExtTerm_T<true, true, false> ( pQword, tFieldMask, tSetup, bNotWeighted );
case 7: return new ExtTerm_T<true, true, true> ( pQword, tFieldMask, tSetup, bNotWeighted );
default:
assert ( 0 && "Internal error" );
return nullptr;
}
}
static ExtNode_i * CreateTermNode ( ISphQword * pQword, const ISphQwordSetup & tSetup, bool bUseBM25, bool bRowidLimits )
{
int iSwitch = 4*(bUseBM25?1:0) + 2*(bRowidLimits?1:0) + (tSetup.m_pStats?1:0);
switch ( iSwitch )
{
case 0: return new ExtTerm_T<false, false, false> ( pQword, tSetup );
case 1: return new ExtTerm_T<false, false, true> ( pQword, tSetup );
case 2: return new ExtTerm_T<false, true, false> ( pQword, tSetup );
case 3: return new ExtTerm_T<false, true, true> ( pQword, tSetup );
case 4: return new ExtTerm_T<true, false, false> ( pQword, tSetup );
case 5: return new ExtTerm_T<true, false, true> ( pQword, tSetup );
case 6: return new ExtTerm_T<true, true, false> ( pQword, tSetup );
case 7: return new ExtTerm_T<true, true, true> ( pQword, tSetup );
default:
assert ( 0 && "Internal error" );
return nullptr;
}
}
static ExtNode_i * CreateMultiAndNode ( const VecTraits_T<XQNode_t*> & dXQNodes, const ISphQwordSetup & tSetup, bool bUseBM25, bool bRowidLimits )
{
bool bNeedFieldSpec = false;
for ( const auto & i : dXQNodes )
bNeedFieldSpec |= !i->m_dSpec.m_dFieldMask.TestAll(true);
int iSwitch = 4*(bUseBM25?1:0) + 2*(bNeedFieldSpec?1:0) + (bRowidLimits?1:0);
switch ( iSwitch )
{
case 0: return new ExtMultiAnd_T<false, false, false> ( dXQNodes, tSetup );
case 1: return new ExtMultiAnd_T<false, false, true> ( dXQNodes, tSetup );
case 2: return new ExtMultiAnd_T<false, true, false> ( dXQNodes, tSetup );
case 3: return new ExtMultiAnd_T<false, true, true> ( dXQNodes, tSetup );
case 4: return new ExtMultiAnd_T<true, false, false> ( dXQNodes, tSetup );
case 5: return new ExtMultiAnd_T<true, false, true> ( dXQNodes, tSetup );
case 6: return new ExtMultiAnd_T<true, true, false> ( dXQNodes, tSetup );
case 7: return new ExtMultiAnd_T<true, true, true> ( dXQNodes, tSetup );
default:
assert ( 0 && "Internal error" );
return nullptr;
}
}
ExtNode_i * ExtNode_i::Create ( ISphQword * pQword, const XQNode_t * pNode, const ISphQwordSetup & tSetup, bool bUseBM25, bool bRowidLimits )
{
assert ( pQword );
if ( pNode->m_dSpec.m_iFieldMaxPos )
pQword->m_iTermPos = TERM_POS_FIELD_LIMIT;
if ( pNode->m_dSpec.m_dZones.GetLength() )
pQword->m_iTermPos = TERM_POS_ZONES;
if ( !pQword->m_bHasHitlist )
{
if ( tSetup.m_pWarning && pQword->m_iTermPos!=TERM_POS_NONE )
tSetup.m_pWarning->SetSprintf ( "hitlist unavailable, position limit ignored" );
return CreateHitlessNode ( pQword, pNode->m_dSpec.m_dFieldMask, tSetup, pNode->m_bNotWeighted, bUseBM25, bRowidLimits );
}
switch ( pQword->m_iTermPos )
{
case TERM_POS_FIELD_STARTEND: return CreateTermposNode<TERM_POS_FIELD_STARTEND> ( pQword, pNode, tSetup, bUseBM25, bRowidLimits );
case TERM_POS_FIELD_START: return CreateTermposNode<TERM_POS_FIELD_START> ( pQword, pNode, tSetup, bUseBM25, bRowidLimits );
case TERM_POS_FIELD_END: return CreateTermposNode<TERM_POS_FIELD_END> ( pQword, pNode, tSetup, bUseBM25, bRowidLimits );
case TERM_POS_FIELD_LIMIT: return CreateTermposNode<TERM_POS_FIELD_LIMIT> ( pQword, pNode, tSetup, bUseBM25, bRowidLimits );
case TERM_POS_ZONES: return CreateTermposNode<TERM_POS_ZONES> ( pQword, pNode, tSetup, bUseBM25, bRowidLimits );
default: return CreateTermNode ( pQword, pNode->m_dSpec.m_dFieldMask, tSetup, pNode->m_bNotWeighted, bUseBM25, bRowidLimits );
}
}
ExtNode_i * ExtNode_i::Create ( const XQKeyword_t & tWord, const ISphQwordSetup & tSetup, DictRefPtr_c pZonesDict, bool bUseBM25, bool bRowidLimits )
{
return CreateTermNode ( CreateQueryWord ( tWord, tSetup, std::move (pZonesDict) ), tSetup, bUseBM25, bRowidLimits );
}
//////////////////////////////////////////////////////////////////////////
ExtNode_c::ExtNode_c ( int64_t tmTimeout )
: m_iMaxTimer { tmTimeout }
{
m_dDocs[0].m_tRowID = INVALID_ROWID;
m_dHits.Reserve ( MAX_BLOCK_DOCS );
}
void ExtNode_c::DebugDump ( int iLevel )
{
DebugIndent ( iLevel );
printf ( "ExtNode\n" );
}
void ExtNode_c::SetAtomPos ( int iPos )
{
m_iAtomPos = iPos;
}
int ExtNode_c::GetAtomPos() const
{
return m_iAtomPos;
}
void ExtNode_c::SetQPosReverse ()
{
m_bQPosReverse = true;
}
void ExtNode_c::SetMaxTimeout ( int64_t iTimer )
{
m_iMaxTimer = iTimer;
}
bool ExtNode_c::TimeExceeded() const
{
return sph::TimeExceeded ( m_iMaxTimer );
}
int64_t ExtNode_c::GetMaxTimeout() const
{
return m_iMaxTimer;
}
const ExtHit_t * ExtNode_c::GetHits ( const ExtDoc_t * pDocs )
{
m_dHits.Resize(0);
CollectHits ( pDocs );
return ReturnHits ( m_bQPosReverse );
}
inline const ExtDoc_t * ExtNode_c::ReturnDocsChunk ( int iCount, const char * sNode, const char * sTerm )
{
assert ( iCount>=0 && iCount<MAX_BLOCK_DOCS );
m_dDocs[iCount].m_tRowID = INVALID_ROWID;
PrintDocsChunk ( iCount, m_iAtomPos, m_dDocs, sNode, this, sTerm );
return iCount ? m_dDocs : nullptr;
}
inline const ExtHit_t * ExtNode_c::ReturnHits ( bool bReverse )
{
int iCount = m_dHits.GetLength();
m_dHits.Add().m_tRowID = INVALID_ROWID;
#ifndef NDEBUG
for ( int i=1; i<iCount; i++ )
{
bool bQPosPassed = ( ( bReverse && m_dHits[i-1].m_uQuerypos>=m_dHits[i].m_uQuerypos ) || ( !bReverse && m_dHits[i-1].m_uQuerypos<=m_dHits[i].m_uQuerypos ) );
assert ( m_dHits[i-1].m_tRowID!=m_dHits[i].m_tRowID ||
( m_dHits[i-1].m_uHitpos<m_dHits[i].m_uHitpos || ( m_dHits[i-1].m_uHitpos==m_dHits[i].m_uHitpos && bQPosPassed ) ) );
assert ( m_dHits[i-1].m_tRowID <= m_dHits[i].m_tRowID );
}
#endif
PrintHitsChunk ( iCount, m_iAtomPos, iCount ? m_dHits.Begin() : nullptr, this );
return m_dHits.Begin();
}
inline const ExtHit_t * ExtNode_c::ReturnHitsChunk ( int iCount, const char * sNode, bool bReverse )
{
assert ( iCount>=0 && iCount<MAX_HITS );
m_dHits[iCount].m_tRowID = INVALID_ROWID;
#ifndef NDEBUG
for ( int i=1; i<iCount; i++ )
{
bool bQPosPassed = ( ( bReverse && m_dHits[i-1].m_uQuerypos>=m_dHits[i].m_uQuerypos ) || ( !bReverse && m_dHits[i-1].m_uQuerypos<=m_dHits[i].m_uQuerypos ) );
assert ( m_dHits[i-1].m_tRowID!=m_dHits[i].m_tRowID ||
( m_dHits[i-1].m_uHitpos<m_dHits[i].m_uHitpos || ( m_dHits[i-1].m_uHitpos==m_dHits[i].m_uHitpos && bQPosPassed ) ) );
}
#endif
const ExtHit_t * pHits = iCount ? m_dHits.Begin() : nullptr;
PrintHitsChunk ( iCount, m_iAtomPos, pHits, this );
return pHits;
}
//////////////////////////////////////////////////////////////////////////
struct ExtPayloadEntry_t
{
RowID_t m_tRowID;
Hitpos_t m_uHitpos;
bool operator < ( const ExtPayloadEntry_t & rhs ) const
{
if ( m_tRowID!=rhs.m_tRowID )
return ( m_tRowID<rhs.m_tRowID );
return ( m_uHitpos<rhs.m_uHitpos );
}
};
struct ExtPayloadKeyword_t : public XQKeyword_t
{
CSphString m_sDictWord;
SphWordID_t m_uWordID;
float m_fIDF;
int m_iDocs;
int m_iHits;
};
/// simple in-memory multi-term cache
template <bool ROWID_LIMITS>
class ExtPayloadBase_T : public ExtNode_c
{
public:
ExtPayloadBase_T ( const XQNode_t * pNode, const ISphQwordSetup & tSetup, const RowIdBoundaries_t * pBoundaries );
void Reset ( const ISphQwordSetup & tSetup ) override;
void HintRowID ( RowID_t ) override {} // FIXME!!! implement with tree
void CollectHits ( const ExtDoc_t * pDocs ) override;
int GetQwords ( ExtQwordsHash_t & hQwords ) override;
void SetQwordsIDF ( const ExtQwordsHash_t & hQwords ) override;
void GetTerms ( const ExtQwordsHash_t &, CSphVector<TermPos_t> & ) const override;
bool GotHitless () override { return false; }
int GetDocsCount() const override { return m_tWord.m_iDocs; }
uint64_t GetWordID () const override { return m_tWord.m_uWordID; }
void SetRowidBoundaries ( const RowIdBoundaries_t & tBoundaries ) override {}
protected:
CSphVector<ExtPayloadEntry_t> m_dCache;
ExtPayloadKeyword_t m_tWord;
int m_iCurDocsEnd; ///< end of the last docs chunk returned, exclusive, ie [begin,end)
int m_iCurHit; ///< end of the last hits chunk (within the last docs chunk) returned, exclusive
CSphString * m_pWarning;
private:
FieldMask_t m_dFieldMask;
RowIdBoundaries_t m_tBoundaries;
void PopulateCache ( const ISphQwordSetup & tSetup, bool bFillStat );
void FetchHits ( ISphQword * pQword, bool bFillStat );
void FilterHits();
};
template<bool USE_BM25, bool ROWID_LIMITS>
class ExtPayload_T : public ExtPayloadBase_T<ROWID_LIMITS>
{
using BASE = ExtPayloadBase_T<ROWID_LIMITS>;
public:
ExtPayload_T ( const XQNode_t * pNode, const ISphQwordSetup & tSetup, const RowIdBoundaries_t * pBoundaries ) : BASE ( pNode, tSetup, pBoundaries ) {}
const ExtDoc_t * GetDocsChunk() final;
NodeEstimate_t Estimate ( int64_t iTotalDocs ) const final { return { 0.0f, 0, 0 }; }
};
template<bool ROWID_LIMITS>
ExtPayloadBase_T<ROWID_LIMITS>::ExtPayloadBase_T ( const XQNode_t * pNode, const ISphQwordSetup & tSetup, const RowIdBoundaries_t * pBoundaries )
: ExtNode_c { tSetup.m_iMaxTimer }
{
// sanity checks
// this node must be only created for a huge OR of tiny expansions
assert ( pNode->m_dWords.GetLength()==1 );
assert ( pNode->m_dWords.Begin()->m_pPayload );
assert ( pNode->m_dSpec.m_dZones.GetLength()==0 && !pNode->m_dSpec.m_bZoneSpan );
(XQKeyword_t &)m_tWord = *pNode->m_dWords.Begin();
m_dFieldMask = pNode->m_dSpec.m_dFieldMask;
m_iAtomPos = m_tWord.m_iAtomPos;
BYTE sTmpWord [ 3*SPH_MAX_WORD_LEN + 4 ];
// our little stemming buffer (morphology aware dictionary might need to change the keyword)
strncpy ( (char*)sTmpWord, m_tWord.m_sWord.cstr(), sizeof(sTmpWord) );
sTmpWord[sizeof(sTmpWord)-1] = '\0';
// setup keyword disk reader
m_tWord.m_uWordID = tSetup.m_pDict->GetWordID ( sTmpWord );
m_tWord.m_sDictWord = (const char *)sTmpWord;
m_tWord.m_fIDF = -1.0f;
m_tWord.m_iDocs = 0;
m_tWord.m_iHits = 0;
m_pWarning = tSetup.m_pWarning;
SetMaxTimeout ( tSetup.m_iMaxTimer );
if ( pBoundaries )
m_tBoundaries = *pBoundaries;
PopulateCache ( tSetup, true );
}
template<bool ROWID_LIMITS>
void ExtPayloadBase_T<ROWID_LIMITS>::FetchHits ( ISphQword * pQword, bool bFillStat )
{
m_dCache.Reserve ( Max ( pQword->m_iHits, pQword->m_iDocs ) );
while (true)
{
const CSphMatch & tMatch = pQword->GetNextDoc();
if ( tMatch.m_tRowID==INVALID_ROWID )
break;
pQword->SeekHitlist ( pQword->m_iHitlistPos );
for ( Hitpos_t uHit = pQword->GetNextHit(); uHit!=EMPTY_HIT; uHit = pQword->GetNextHit() )
{
// apply field limits
if ( !m_dFieldMask.Test ( HITMAN::GetField(uHit) ) )
continue;
if constexpr ( ROWID_LIMITS )
{
if ( !bFillStat && ( tMatch.m_tRowID < m_tBoundaries.m_tMinRowID || tMatch.m_tRowID > m_tBoundaries.m_tMaxRowID ) )
continue;
}
// FIXME!!! apply zone limits too
// apply field-start/field-end modifiers
if ( m_tWord.m_bFieldStart && HITMAN::GetPos(uHit)!=1 )
continue;
if ( m_tWord.m_bFieldEnd && !HITMAN::IsEnd(uHit) )
continue;
// ok, this hit works, copy it
ExtPayloadEntry_t & tEntry = m_dCache.Add();
tEntry.m_tRowID = tMatch.m_tRowID;
tEntry.m_uHitpos = uHit;
}
}
m_dCache.Sort();
}
template<bool ROWID_LIMITS>
void ExtPayloadBase_T<ROWID_LIMITS>::FilterHits()
{
auto * pFront = m_dCache.Begin();
auto * pBack = (m_dCache.End()-1);
if ( pBack->m_tRowID < m_tBoundaries.m_tMinRowID || pFront->m_tRowID > m_tBoundaries.m_tMaxRowID )
m_dCache.Resize(0);
else
{
bool bCutFront = pFront->m_tRowID < m_tBoundaries.m_tMinRowID;
bool bCutBack = pBack->m_tRowID > m_tBoundaries.m_tMaxRowID;
if ( bCutFront || bCutBack )
{
auto * pPtr = pFront;
auto * pEnd = pBack+1;
int iCutFront = 0;
if ( bCutFront )
{
pPtr = std::lower_bound ( pPtr, pEnd, m_tBoundaries.m_tMinRowID, []( auto & tEntry, RowID_t tValue ){ return tEntry.m_tRowID < tValue; } );
iCutFront = pPtr-pFront;
}
if ( bCutBack )
{
pEnd = std::upper_bound ( pPtr, pEnd, m_tBoundaries.m_tMaxRowID, []( RowID_t tValue, auto & tEntry ){ return tValue < tEntry.m_tRowID; } );
m_dCache.Resize ( pEnd-pFront );
}
m_dCache.Remove ( 0, iCutFront );
}
}
}
template<bool ROWID_LIMITS>
void ExtPayloadBase_T<ROWID_LIMITS>::PopulateCache ( const ISphQwordSetup & tSetup, bool bFillStat )
{
std::unique_ptr<ISphQword> pQword = std::unique_ptr<ISphQword>(tSetup.QwordSpawn(m_tWord));
pQword->m_sWord = m_tWord.m_sWord;
pQword->m_uWordID = m_tWord.m_uWordID;
pQword->m_sDictWord = m_tWord.m_sDictWord;
pQword->m_bExpanded = true;
bool bOk = tSetup.QwordSetup ( pQword.get() );
// setup keyword idf and stats
if ( bFillStat )
{
m_tWord.m_iDocs = pQword->m_iDocs;
m_tWord.m_iHits = pQword->m_iHits;
}
// read and cache all docs and hits
if ( bOk )
FetchHits ( pQword.get(), bFillStat );
if ( bFillStat && m_dCache.GetLength() )
{
// there might be duplicate documents, but not hits, lets recalculate docs count
// FIXME!!! that not work for RT index - get rid of ExtPayloadBase_T and move PopulateCache code to index specific QWord
// FIXME! if we have pseudo_sharding=1, we read all hits just to calculate the correct number of docs (instead of early filtering by rowid)
// because of this we also can't hint individual qwords to the start of rowid boundaries
RowID_t tLastRowID = m_dCache.Begin()->m_tRowID;
const ExtPayloadEntry_t * pCur = m_dCache.Begin() + 1;
const ExtPayloadEntry_t * pEnd = m_dCache.Begin() + m_dCache.GetLength();
int iDocsTotal = 1;
while ( pCur!=pEnd )
{
iDocsTotal += ( tLastRowID!=pCur->m_tRowID );
tLastRowID = pCur->m_tRowID;
pCur++;
}
m_tWord.m_iDocs = iDocsTotal;
// remove hits that don't belong to our pseudo shard
// we could do this earlier, but we need to collect hits to calculate the correct number of docs (affects weight calc)
if constexpr ( ROWID_LIMITS )
FilterHits();
}
m_iCurDocsEnd = 0;
m_iCurHit = 0;
}
template<bool ROWID_LIMITS>
void ExtPayloadBase_T<ROWID_LIMITS>::Reset ( const ISphQwordSetup & tSetup )
{
SetMaxTimeout ( tSetup.m_iMaxTimer );
m_dCache.Resize ( 0 );
PopulateCache ( tSetup, false );
}
template<bool ROWID_LIMITS>
void ExtPayloadBase_T<ROWID_LIMITS>::CollectHits ( const ExtDoc_t * pDocs )
{
int iCurHit = m_iCurHit;
const int iCurDocsEnd = m_iCurDocsEnd;
while ( HasDocs(pDocs) && iCurHit<iCurDocsEnd )
{
// skip rejected documents
while ( iCurHit<iCurDocsEnd && m_dCache[iCurHit].m_tRowID<pDocs->m_tRowID )
iCurHit++;
if ( iCurHit>=iCurDocsEnd )
break;
// skip non-matching documents
RowID_t tRowID = m_dCache[iCurHit].m_tRowID;
if ( pDocs->m_tRowID<tRowID )
{
while ( pDocs->m_tRowID<tRowID )
pDocs++;
if ( pDocs->m_tRowID!=tRowID )
continue;
}
// copy accepted documents
while ( iCurHit<iCurDocsEnd && m_dCache[iCurHit].m_tRowID==pDocs->m_tRowID )
{
ExtHit_t & tHit = m_dHits.Add();
tHit.m_tRowID = m_dCache[iCurHit].m_tRowID;
tHit.m_uHitpos = m_dCache[iCurHit].m_uHitpos;
tHit.m_uQuerypos = (WORD) m_tWord.m_iAtomPos;
tHit.m_uWeight = tHit.m_uMatchlen = tHit.m_uSpanlen = 1;
iCurHit++;
}
}
m_iCurHit = iCurHit;
}
template<bool ROWID_LIMITS>
int ExtPayloadBase_T<ROWID_LIMITS>::GetQwords ( ExtQwordsHash_t & hQwords )
{
int iMax = -1;
ExtQword_t tQword;
tQword.m_sWord = m_tWord.m_sWord;
tQword.m_sDictWord = m_tWord.m_sDictWord;
tQword.m_iDocs = m_tWord.m_iDocs;
tQword.m_iHits = m_tWord.m_iHits;
tQword.m_fIDF = -1.0f;
tQword.m_fBoost = m_tWord.m_fBoost;
tQword.m_iQueryPos = m_tWord.m_iAtomPos;
tQword.m_bExpanded = true;
tQword.m_bExcluded = m_tWord.m_bExcluded;
hQwords.Add ( tQword, m_tWord.m_sWord );
if ( !m_tWord.m_bExcluded )
iMax = Max ( iMax, m_tWord.m_iAtomPos );
return iMax;
}
template<bool ROWID_LIMITS>
void ExtPayloadBase_T<ROWID_LIMITS>::SetQwordsIDF ( const ExtQwordsHash_t & hQwords )
{
// pull idfs
if ( m_tWord.m_fIDF<0.0f )
{
assert ( hQwords ( m_tWord.m_sWord ) );
m_tWord.m_fIDF = hQwords ( m_tWord.m_sWord )->m_fIDF;
}
}
template<bool ROWID_LIMITS>
void ExtPayloadBase_T<ROWID_LIMITS>::GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const
{
if ( m_tWord.m_bExcluded )
return;
ExtQword_t & tQword = hQwords[ m_tWord.m_sWord ];
TermPos_t & tPos = dTermDupes.Add();
tPos.m_uAtomPos = (WORD)m_tWord.m_iAtomPos;
tPos.m_uQueryPos = (WORD)tQword.m_iQueryPos;
}
//////////////////////////////////////////////////////////////////////////
template <bool USE_BM25, bool ROWID_LIMITS>
const ExtDoc_t * ExtPayload_T<USE_BM25,ROWID_LIMITS>::GetDocsChunk()
{
// max_query_time
if ( BASE::TimeExceeded() )
{
if ( BASE::m_pWarning )
*BASE::m_pWarning = "query time exceeded max_query_time";
return nullptr;
}
// interrupt by sitgerm
if ( sph::TimeExceeded ( BASE::m_iCheckTimePoint ) )
{
if ( g_bInterruptNow )
{
if ( BASE::m_pWarning )
*BASE::m_pWarning = "Server shutdown in progress";
return nullptr;
}
if ( session::GetKilled() )
{
if ( BASE::m_pWarning )
*BASE::m_pWarning = "query was killed";
return nullptr;
}
Threads::Coro::RescheduleAndKeepCrashQuery();
}
int iDoc = 0;
int iEnd = BASE::m_iCurDocsEnd;
while ( iDoc<MAX_BLOCK_DOCS-1 && iEnd<BASE::m_dCache.GetLength() )
{
RowID_t tRowID = BASE::m_dCache[iEnd].m_tRowID;
ExtDoc_t & tDoc = BASE::m_dDocs[iDoc++];
tDoc.m_tRowID = tRowID;
tDoc.m_uDocFields = 0;
int iHitStart = iEnd;
while ( iEnd<BASE::m_dCache.GetLength() && BASE::m_dCache[iEnd].m_tRowID==tRowID )
{
tDoc.m_uDocFields |= 1<< ( HITMAN::GetField ( BASE::m_dCache[iEnd].m_uHitpos ) );
iEnd++;
}
if constexpr ( USE_BM25 )
{
int iHits = iEnd - iHitStart;
tDoc.m_fTFIDF = float(iHits) / float(SPH_BM25_K1+iHits) * BASE::m_tWord.m_fIDF;
}
}
BASE::m_iCurDocsEnd = iEnd;
return BASE::ReturnDocsChunk ( iDoc, "payload", BASE::m_tWord.m_sDictWord.cstr() );
}
//////////////////////////////////////////////////////////////////////////
static ExtNode_i * CreatePayloadNode ( const XQNode_t * pNode, const ISphQwordSetup & tSetup, bool bUseBM25, const RowIdBoundaries_t * pBoundaries )
{
int iSwitch = 2*(bUseBM25?1:0) + (pBoundaries?1:0);
switch ( iSwitch )
{
case 0: return new ExtPayload_T<false, false> ( pNode, tSetup, pBoundaries );
case 1: return new ExtPayload_T<false, true> ( pNode, tSetup, pBoundaries );
case 2: return new ExtPayload_T<true, false> ( pNode, tSetup, pBoundaries );
case 3: return new ExtPayload_T<true, true> ( pNode, tSetup, pBoundaries );
default:
assert ( 0 && "Internal error" );
return nullptr;
}
}
ExtNode_i * ExtNode_i::Create ( const XQNode_t * pNode, const ISphQwordSetup & tSetup, bool bUseBM25, const RowIdBoundaries_t * pBoundaries )
{
// empty node?
if ( pNode->IsEmpty() && pNode->GetOp()!=SPH_QUERY_SCAN )
return nullptr;
bool bRowidLimits = !!pBoundaries;
if ( pNode->GetOp()==SPH_QUERY_SCAN )
return CreateHitlessNode ( tSetup.ScanSpawn(), pNode->m_dSpec.m_dFieldMask, tSetup, true, bUseBM25, bRowidLimits );
if ( pNode->m_dWords.GetLength() || pNode->m_bVirtuallyPlain )
{
const int iWords = pNode->m_bVirtuallyPlain
? pNode->m_dChildren.GetLength()
: pNode->m_dWords.GetLength();
if ( iWords==1 )
{
if ( pNode->m_dWords.Begin()->m_bExpanded && pNode->m_dWords.Begin()->m_pPayload )
return CreatePayloadNode ( pNode, tSetup, bUseBM25, pBoundaries );
if ( pNode->m_bVirtuallyPlain )
return Create ( pNode->m_dChildren[0], tSetup, bUseBM25, pBoundaries );
else
return Create ( pNode->m_dWords[0], pNode, tSetup, bUseBM25, bRowidLimits );
}
switch ( pNode->GetOp() )
{
case SPH_QUERY_PHRASE:
return CreateMultiNode<ExtPhrase_c> ( pNode, tSetup, true, bUseBM25, pBoundaries );
case SPH_QUERY_PROXIMITY:
return CreateMultiNode<ExtProximity_c> ( pNode, tSetup, true, bUseBM25, pBoundaries );
case SPH_QUERY_NEAR:
return CreateMultiNode<ExtMultinear_c> ( pNode, tSetup, true, bUseBM25, pBoundaries );
case SPH_QUERY_QUORUM:
{
assert ( pNode->m_dWords.GetLength()==0 || pNode->m_dChildren.GetLength()==0 );
int iQuorumCount = pNode->m_dWords.GetLength()+pNode->m_dChildren.GetLength();
int iThr = ExtQuorum_c::GetThreshold ( *pNode, iQuorumCount );
bool bOrOperator = false;
if ( iThr>=iQuorumCount )
{
// threshold is too high
if ( tSetup.m_pWarning && !pNode->m_bPercentOp )
tSetup.m_pWarning->SetSprintf ( "quorum threshold too high (words=%d, thresh=%d); replacing quorum operator with AND operator",
iQuorumCount, pNode->m_iOpArg );
} else if ( iQuorumCount>256 )
{
// right now quorum can only handle 256 words
if ( tSetup.m_pWarning )
tSetup.m_pWarning->SetSprintf ( "too many words (%d) for quorum; replacing with an AND", iQuorumCount );
} else if ( iThr==1 )
{
bOrOperator = true;
} else // everything is ok; create quorum node
{
return CreateMultiNode<ExtQuorum_c> ( pNode, tSetup, false, bUseBM25, pBoundaries );
}
// couldn't create quorum, make an AND node instead
CSphVector<ExtNode_i*> dTerms;
dTerms.Reserve ( iQuorumCount );
for ( const XQKeyword_t& tWord: pNode->m_dWords )
dTerms.Add ( Create ( tWord, pNode, tSetup, bUseBM25, bRowidLimits ) );
for ( const XQNode_t* tNode: pNode->m_dChildren )
dTerms.Add ( Create ( tNode, tSetup, bUseBM25, pBoundaries ) );
// make not simple, but optimized AND node.
dTerms.Sort ( ExtNodeTF_fn() );
ExtNode_i * pCur = dTerms[0];
for ( int i=1; i<dTerms.GetLength(); i++ )
{
if ( !bOrOperator )
pCur = new ExtAnd_c ( pCur, dTerms[i] );
else
pCur = new ExtOr_c ( pCur, dTerms[i] );
}
if ( pNode->GetCount() )
return tSetup.m_pNodeCache->CreateProxy ( pCur, pNode, tSetup );
return pCur;
}
default:
assert ( 0 && "unexpected plain node type" );
return NULL;
}
} else
{
int iChildren = pNode->m_dChildren.GetLength ();
assert ( iChildren>0 );
// special case, operator BEFORE
if ( pNode->GetOp ()==SPH_QUERY_BEFORE )
{
// before operator can not handle ZONESPAN
if ( tSetup.m_pWarning
&& pNode->m_dChildren.any_of ( [] ( XQNode_t * pChild ) { return pChild->m_dSpec.m_bZoneSpan; } ) )
tSetup.m_pWarning->SetSprintf ( "BEFORE operator is incompatible with ZONESPAN, ZONESPAN ignored" );
return CreateOrderNode ( pNode, tSetup, bUseBM25, pBoundaries );
}
// special case, AND over terms (internally reordered for speed)
bool bAndTerms = ( pNode->GetOp()==SPH_QUERY_AND );
for ( int i=0; i<iChildren && bAndTerms; i++ )
{
const XQNode_t * pChildren = pNode->m_dChildren[i];
bAndTerms = ( pChildren->m_dWords.GetLength()==1 );
}
bool bZonespan = bAndTerms;
for ( int i=0; i<iChildren && bZonespan; i++ )
bZonespan &= pNode->m_dChildren[i]->m_dSpec.m_bZoneSpan;
if ( bAndTerms )
{
// check if we can create multi-and node
bool bMultiAnd = !bZonespan && pNode->m_dChildren.GetLength()>1;
for ( int i=0; i<iChildren && bMultiAnd; i++ )
{
const XQNode_t * pChild = pNode->m_dChildren[i];
const XQKeyword_t & tWord = pChild->m_dWords[0];
if ( tWord.m_bFieldStart || tWord.m_bFieldEnd || tWord.m_pPayload || pChild->m_dSpec.m_iFieldMaxPos || pChild->m_dSpec.m_dZones.GetLength() )
{
bMultiAnd = false;
break;
}
}
ESphHitless eMode = tSetup.m_pIndex ? tSetup.m_pIndex->GetSettings().m_eHitless : SPH_HITLESS_NONE;
if ( eMode==SPH_HITLESS_SOME || eMode==SPH_HITLESS_ALL )
bMultiAnd = false;
if ( !bMultiAnd )
{
// create eval-tree terms from query-tree nodes
CSphVector<ExtNode_i*> dTerms;
for ( int i=0; i<iChildren; i++ )
{
const XQNode_t * pChild = pNode->m_dChildren[i];
ExtNode_i * pTerm = ExtNode_i::Create ( pChild, tSetup, bUseBM25, pBoundaries );
if ( pTerm )
dTerms.Add ( pTerm );
}
// sort them by frequency, to speed up AND matching
dTerms.Sort ( ExtNodeTF_fn() );
// create the right eval-tree node
ExtNode_i * pCur = dTerms[0];
for ( int i=1; i<dTerms.GetLength(); i++ )
if ( bZonespan )
pCur = new ExtAndZonespan_c ( pCur, dTerms[i], tSetup, pNode->m_dChildren[0] );
else
pCur = new ExtAnd_c ( pCur, dTerms[i] );
// zonespan has Extra data which is not (yet?) covered by common-node optimizations,
// so we need to avoid those for zonespan
if ( !bZonespan && pNode->GetCount() )
return tSetup.m_pNodeCache->CreateProxy ( pCur, pNode, tSetup );
return pCur;
}
else
return CreateMultiAndNode ( pNode->m_dChildren, tSetup, bUseBM25, bRowidLimits );
}
// Multinear and phrase could be also non-plain, so here is the second entry for it.
if ( pNode->GetOp()==SPH_QUERY_NEAR )
return CreateMultiNode<ExtMultinear_c> ( pNode, tSetup, true, bUseBM25, pBoundaries );
if ( pNode->GetOp()==SPH_QUERY_PHRASE )
return CreateMultiNode<ExtPhrase_c> ( pNode, tSetup, true, bUseBM25, pBoundaries );
// generic create
ExtNode_i * pCur = NULL;
for ( int i=0; i<iChildren; i++ )
{
ExtNode_i * pNext = ExtNode_i::Create ( pNode->m_dChildren[i], tSetup, bUseBM25, pBoundaries );
if ( !pNext ) continue;
if ( !pCur )
{
pCur = pNext;
continue;
}
switch ( pNode->GetOp() )
{
case SPH_QUERY_OR: pCur = new ExtOr_c ( pCur, pNext ); break;
case SPH_QUERY_MAYBE: pCur = new ExtMaybe_c ( pCur, pNext ); break;
case SPH_QUERY_AND: pCur = new ExtAnd_c ( pCur, pNext ); break;
case SPH_QUERY_ANDNOT: pCur = new ExtAndNot_c ( pCur, pNext ); break;
case SPH_QUERY_SENTENCE:
case SPH_QUERY_PARAGRAPH:
{
const char * szUnit = pNode->GetOp()==SPH_QUERY_SENTENCE ? MAGIC_WORD_SENTENCE : MAGIC_WORD_PARAGRAPH;
if ( bRowidLimits )
pCur = new ExtUnit_T<true> ( pCur, pNext, pNode->m_dSpec.m_dFieldMask, tSetup, szUnit );
else
pCur = new ExtUnit_T<false> ( pCur, pNext, pNode->m_dSpec.m_dFieldMask, tSetup, szUnit );
}
break;
case SPH_QUERY_NOTNEAR: pCur = new ExtNotNear_c ( pCur, pNext, pNode->m_iOpArg ); break;
default: assert ( 0 && "internal error: unhandled op in ExtNode_i::Create()" ); break;
}
}
if ( pCur && pNode->GetCount() )
return tSetup.m_pNodeCache->CreateProxy ( pCur, pNode, tSetup );
return pCur;
}
}
//////////////////////////////////////////////////////////////////////////
template<bool USE_BM25, bool ROWID_LIMITS, bool STATS>
inline void ExtTerm_T<USE_BM25,ROWID_LIMITS,STATS>::Init ( ISphQword * pQword, const FieldMask_t & tFields, const ISphQwordSetup & tSetup, bool bNotWeighted )
{
m_pQword = pQword;
m_pWarning = tSetup.m_pWarning;
m_bNotWeighted = bNotWeighted;
m_iAtomPos = pQword->m_iAtomPos;
m_dQueriedFields = tFields;
m_bHasWideFields = false;
if ( tSetup.m_bHasWideFields )
for ( int i=1; i<FieldMask_t::SIZE && !m_bHasWideFields; i++ )
if ( m_dQueriedFields[i] )
m_bHasWideFields = true;
SetMaxTimeout ( tSetup.m_iMaxTimer );
if constexpr(STATS)
{
m_pStats = tSetup.m_pStats;
m_pNanoBudget = m_pStats ? m_pStats->m_pNanoBudget : NULL;
}
}
template<bool USE_BM25, bool ROWID_LIMITS, bool STATS>
ExtTerm_T<USE_BM25,ROWID_LIMITS,STATS>::ExtTerm_T ( ISphQword * pQword, const ISphQwordSetup & tSetup )
: m_pQword ( pQword )
, m_pWarning ( tSetup.m_pWarning )
{
m_iAtomPos = pQword->m_iAtomPos;
m_dQueriedFields.SetAll();
m_bHasWideFields = tSetup.m_bHasWideFields;
SetMaxTimeout( tSetup.m_iMaxTimer );
if constexpr ( STATS )
{
m_pStats = tSetup.m_pStats;
m_pNanoBudget = m_pStats ? m_pStats->m_pNanoBudget : nullptr;
}
}
template<bool USE_BM25, bool ROWID_LIMITS, bool STATS>
void ExtTerm_T<USE_BM25,ROWID_LIMITS,STATS>::Reset ( const ISphQwordSetup & tSetup )
{
SetMaxTimeout ( tSetup.m_iMaxTimer );
m_pQword->Reset ();
tSetup.QwordSetup ( m_pQword );
m_dStoredHits.Resize(0);
}
template<bool USE_BM25, bool ROWID_LIMITS, bool STATS>
const ExtDoc_t * ExtTerm_T<USE_BM25,ROWID_LIMITS,STATS>::GetDocsChunk()
{
if ( !m_pQword->m_iDocs )
return NULL;
// max_query_time
if ( TimeExceeded () )
{
if ( m_pWarning )
*m_pWarning = "query time exceeded max_query_time";
return NULL;
}
if constexpr ( STATS )
{
// max_predicted_time
if ( m_pNanoBudget && *m_pNanoBudget<0 )
{
if ( m_pWarning )
*m_pWarning = "predicted query time exceeded max_predicted_time";
return nullptr;
}
}
if ( sph::TimeExceeded ( m_iCheckTimePoint ) )
{
// interrupt by sitgerm
if ( g_bInterruptNow )
{
if ( m_pWarning )
*m_pWarning = "Server shutdown in progress";
return nullptr;
}
if ( session::GetKilled() )
{
if ( m_pWarning )
*m_pWarning = "query was killed";
return nullptr;
}
Threads::Coro::RescheduleAndKeepCrashQuery();
}
StoredHit_t * pStoredHit = nullptr;
StoredHit_t * pFirstHit = nullptr;
if ( m_bCollectHits )
{
int iLength = m_dStoredHits.GetLength();
m_dStoredHits.Reserve ( iLength+MAX_BLOCK_DOCS );
pStoredHit = m_dStoredHits.End();
pFirstHit = pStoredHit-iLength; // hack to get to m_pData
}
int iDoc = 0;
while ( iDoc<MAX_BLOCK_DOCS-1 )
{
const CSphMatch & tMatch = m_pQword->GetNextDoc();
if constexpr ( ROWID_LIMITS )
{
if ( tMatch.m_tRowID<m_tBoundaries.m_tMinRowID )
continue;
if ( tMatch.m_tRowID>m_tBoundaries.m_tMaxRowID )
{
m_pQword->m_iDocs = 0;
break;
}
}
if ( tMatch.m_tRowID==INVALID_ROWID )
{
m_pQword->m_iDocs = 0;
break;
}
if ( !m_bHasWideFields )
{
// fields 0-31 can be quickly checked right here, right now
if (!( m_pQword->m_dQwordFields.GetMask32() & m_dQueriedFields.GetMask32() ))
continue;
} else
{
// fields 32+ need to be checked with CollectHitMask() and stuff
m_pQword->CollectHitMask();
bool bHasSameFields = false;
for ( int i=0; i<FieldMask_t::SIZE && !bHasSameFields; i++ )
bHasSameFields = ( m_pQword->m_dQwordFields[i] & m_dQueriedFields[i] )!=0;
if ( !bHasSameFields )
continue;
}
ExtDoc_t & tDoc = m_dDocs[iDoc++];
tDoc.m_tRowID = tMatch.m_tRowID;
tDoc.m_uDocFields = m_pQword->m_dQwordFields.GetMask32() & m_dQueriedFields.GetMask32(); // OPTIMIZE: only needed for phrase node
if_const ( USE_BM25 )
tDoc.m_fTFIDF = float(m_pQword->m_uMatchHits) / float(m_pQword->m_uMatchHits+SPH_BM25_K1) * m_fIDF;
// store some hit info here, we can't reuse m_dDocs in CollectHits
// but only if the ranker uses hits
if ( m_bCollectHits )
{
pStoredHit->m_tHitlistOffset = m_pQword->m_iHitlistPos;
pStoredHit->m_tRowID = tDoc.m_tRowID;
pStoredHit++;
}
}
if ( m_bCollectHits )
m_dStoredHits.Resize ( pStoredHit-pFirstHit );
if constexpr ( STATS )
{
assert(m_pStats);
m_pStats->m_iFetchedDocs += iDoc;
if ( m_pNanoBudget )
*m_pNanoBudget -= g_iPredictorCostDoc*iDoc;
}
return ReturnDocsChunk ( iDoc, "term", m_pQword->m_sDictWord.cstr() );
}
template<bool USE_BM25, bool ROWID_LIMITS, bool STATS>
void ExtTerm_T<USE_BM25,ROWID_LIMITS,STATS>::CollectHits ( const ExtDoc_t * pMatched )
{
if ( !pMatched )
return;
m_dStoredHits.Add().m_tRowID = INVALID_ROWID;
StoredHit_t * pStoredHit = m_dStoredHits.Begin();
for ( ; HasDocs(pMatched); pMatched++ )
{
while ( pStoredHit->m_tRowID < pMatched->m_tRowID )
pStoredHit++;
if ( pStoredHit->m_tRowID!=pMatched->m_tRowID )
continue;
// setup hitlist reader
m_pQword->SeekHitlist ( pStoredHit->m_tHitlistOffset );
while (true)
{
// get next hit
Hitpos_t uHit = m_pQword->GetNextHit();
if ( uHit==EMPTY_HIT )
{
// no more hits; get next acceptable document
pStoredHit++;
break;
}
if ( !( m_dQueriedFields.Test ( HITMAN::GetField(uHit) ) ) )
continue;
ExtHit_t & tHit = m_dHits.Add();
tHit.m_tRowID = pStoredHit->m_tRowID;
tHit.m_uHitpos = uHit;
tHit.m_uQuerypos = (WORD) m_iAtomPos; // assume less that 64K words per query
tHit.m_uWeight = tHit.m_uMatchlen = tHit.m_uSpanlen = 1;
}
}
if constexpr ( STATS )
{
int nHits = m_dHits.GetLength();
assert(m_pStats);
m_pStats->m_iFetchedHits += nHits;
if ( m_pNanoBudget )
*m_pNanoBudget -= g_iPredictorCostHit*nHits;
}
// we assume that GetHits doesn't get called multiple times for the same docids in pMatched
// so let's drop the stored hits that we already used
// so that we won't need to loop through them the next time GetHits gets called for the same docs chunk
// however, remove only the hits that we've processed. others will be processed in the next GetDocsChunk() call
int nProcessed = int ( pStoredHit-m_dStoredHits.Begin() );
m_dStoredHits.Pop(); // end marker
m_dStoredHits.Remove ( 0, nProcessed );
}
template<bool USE_BM25, bool ROWID_LIMITS, bool STATS>
int ExtTerm_T<USE_BM25,ROWID_LIMITS,STATS>::GetQwords ( ExtQwordsHash_t & hQwords )
{
m_fIDF = 0.0f;
ExtQword_t * pQword = hQwords ( m_pQword->m_sWord );
if ( !m_bNotWeighted && pQword && !pQword->m_bExcluded )
pQword->m_iQueryPos = Min ( pQword->m_iQueryPos, m_pQword->m_iAtomPos );
if ( m_bNotWeighted || pQword )
return m_pQword->m_bExcluded ? -1 : m_pQword->m_iAtomPos;
m_fIDF = -1.0f;
ExtQword_t tInfo;
tInfo.m_sWord = m_pQword->m_sWord;
tInfo.m_sDictWord = m_pQword->m_sDictWord;
tInfo.m_iDocs = m_pQword->m_iDocs;
tInfo.m_iHits = m_pQword->m_iHits;
tInfo.m_iQueryPos = m_pQword->m_iAtomPos;
tInfo.m_fIDF = -1.0f; // suppress gcc 4.2.3 warning
tInfo.m_fBoost = m_pQword->m_fBoost;
tInfo.m_bExpanded = m_pQword->m_bExpanded;
tInfo.m_bExcluded = m_pQword->m_bExcluded;
hQwords.Add ( tInfo, m_pQword->m_sWord );
return m_pQword->m_bExcluded ? -1 : m_pQword->m_iAtomPos;
}
template<bool USE_BM25, bool ROWID_LIMITS, bool STATS>
void ExtTerm_T<USE_BM25,ROWID_LIMITS,STATS>::SetQwordsIDF ( const ExtQwordsHash_t & hQwords )
{
if ( m_fIDF<0.0f )
{
assert ( hQwords ( m_pQword->m_sWord ) );
m_fIDF = hQwords ( m_pQword->m_sWord )->m_fIDF;
}
}
template<bool USE_BM25, bool ROWID_LIMITS, bool STATS>
void ExtTerm_T<USE_BM25,ROWID_LIMITS,STATS>::GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const
{
if ( m_bNotWeighted || m_pQword->m_bExcluded )
return;
ExtQword_t & tQword = hQwords[ m_pQword->m_sWord ];
TermPos_t & tPos = dTermDupes.Add ();
tPos.m_uAtomPos = (WORD)m_pQword->m_iAtomPos;
tPos.m_uQueryPos = (WORD)tQword.m_iQueryPos;
}
template<bool USE_BM25, bool ROWID_LIMITS, bool STATS>
uint64_t ExtTerm_T<USE_BM25,ROWID_LIMITS,STATS>::GetWordID () const
{
if ( m_pQword->m_uWordID )
return m_pQword->m_uWordID;
return sphFNV64 ( m_pQword->m_sDictWord.cstr() );
}
template<bool USE_BM25, bool ROWID_LIMITS, bool STATS>
void ExtTerm_T<USE_BM25,ROWID_LIMITS,STATS>::HintRowID ( RowID_t tRowID )
{
m_pQword->HintRowID ( tRowID );
if constexpr ( STATS )
{
assert(m_pStats);
m_pStats->m_iSkips++;
if ( m_pNanoBudget )
*m_pNanoBudget -= g_iPredictorCostSkip;
}
}
template<bool USE_BM25, bool ROWID_LIMITS, bool STATS>
void ExtTerm_T<USE_BM25,ROWID_LIMITS,STATS>::SetRowidBoundaries ( const RowIdBoundaries_t & tBoundaries )
{
m_tBoundaries = tBoundaries;
HintRowID ( tBoundaries.m_tMinRowID );
}
template<bool USE_BM25, bool ROWID_LIMITS, bool STATS>
void ExtTerm_T<USE_BM25,ROWID_LIMITS,STATS>::DebugDump ( int iLevel )
{
DebugIndent ( iLevel );
printf ( "ExtTerm: %s at: %d ", m_pQword->m_sWord.cstr(), m_pQword->m_iAtomPos );
if ( m_dQueriedFields.TestAll(true) )
printf ( "(all)\n" );
else
{
bool bFirst = true;
printf ( "in: " );
for ( int iField=0; iField<SPH_MAX_FIELDS; iField++ )
{
if ( m_dQueriedFields.Test ( iField ) )
{
if ( !bFirst )
printf ( ", " );
printf ( "%d", iField );
bFirst = false;
}
}
printf ( "\n" );
}
}
//////////////////////////////////////////////////////////////////////////
template<bool USE_BM25, bool ROWID_LIMITS, bool STATS>
ExtTermHitless_T<USE_BM25,ROWID_LIMITS,STATS>::ExtTermHitless_T ( ISphQword * pQword, const FieldMask_t & dFields, const ISphQwordSetup & tSetup, bool bNotWeighted )
: BASE ( pQword, dFields, tSetup, bNotWeighted )
{}
template<bool USE_BM25, bool ROWID_LIMITS, bool STATS>
void ExtTermHitless_T<USE_BM25,ROWID_LIMITS,STATS>::CollectHits ( const ExtDoc_t * pMatched )
{
if ( !pMatched )
return;
this->m_dStoredHits.Add().m_tRowID = INVALID_ROWID;
typename BASE::StoredHit_t * pStoredHit = this->m_dStoredHits.Begin();
for ( ; HasDocs(pMatched); pMatched++ )
{
while ( pStoredHit->m_tRowID < pMatched->m_tRowID )
pStoredHit++;
if ( pStoredHit->m_tRowID!=pMatched->m_tRowID )
continue;
DWORD uMaxFields = SPH_MAX_FIELDS;
if ( !this->m_bHasWideFields )
{
uMaxFields = 0;
DWORD uFields = pMatched->m_uDocFields;
while ( uFields ) // count up to highest bit, max value is 32
{
uFields >>= 1;
uMaxFields++;
}
}
for ( DWORD uFieldPos=0; uFieldPos<uMaxFields; uFieldPos++ )
if ( ( pMatched->m_uDocFields & ( 1 << uFieldPos ) ) && this->m_dQueriedFields.Test ( uFieldPos ) )
{
// emit hit
ExtHit_t & tHit = this->m_dHits.Add();
tHit.m_tRowID = pMatched->m_tRowID;
tHit.m_uHitpos = HITMAN::Create ( uFieldPos, -1 );
tHit.m_uQuerypos = (WORD) (this->m_iAtomPos);
tHit.m_uWeight = tHit.m_uMatchlen = tHit.m_uSpanlen = 1;
}
}
if constexpr ( STATS )
{
int nHits = this->m_dHits.GetLength();
assert ( this->m_pStats );
this->m_pStats->m_iFetchedHits += nHits;
if ( this->m_pNanoBudget )
*(this->m_pNanoBudget) -= g_iPredictorCostHit*nHits;
}
// same logic as in ExtTerm_T::CollectHits
int nProcessed = int ( pStoredHit-this->m_dStoredHits.Begin() );
this->m_dStoredHits.Pop(); // end marker
this->m_dStoredHits.Remove ( 0, nProcessed );
}
//////////////////////////////////////////////////////////////////////////
BufferedNode_c::BufferedNode_c()
{
Reset();
}
void BufferedNode_c::Reset()
{
m_pRawDocs = nullptr;
m_pRawDoc = nullptr;
m_pRawHit = nullptr;
m_dMyDocs[0].m_tRowID = INVALID_ROWID;
m_dMyHits.Resize(0);
}
void BufferedNode_c::CopyMatchingHits ( CSphVector<ExtHit_t> & dHits, const ExtDoc_t * pDocs )
{
m_dMyHits.Add().m_tRowID = INVALID_ROWID;
dHits.Resize(0);
const ExtHit_t * pMyHit = m_dMyHits.Begin();
while ( HasDocs(pDocs) )
{
while ( pMyHit->m_tRowID < pDocs->m_tRowID )
pMyHit++;
while ( pMyHit->m_tRowID==pDocs->m_tRowID )
dHits.Add ( *pMyHit++ );
pDocs++;
}
// remove only the hits that we've processed. others will be processed in the next GetDocsChunk() call
int nProcessed = int ( pMyHit-m_dMyHits.Begin() );
m_dMyHits.Pop(); // end marker
m_dMyHits.Remove ( 0, nProcessed );
}
//////////////////////////////////////////////////////////////////////////
template < TermPosFilter_e T, class NODE >
ExtConditional_T<T,NODE>::ExtConditional_T ( ISphQword * pQword, const XQNode_t * pNode, const ISphQwordSetup & tSetup )
: ExtNode_c { tSetup.m_iMaxTimer }
, BufferedNode_c ()
, Acceptor_c ( pQword, pNode, tSetup )
{
// we still need those hits even if the ranker hints us that we can ignore them
m_tNode.SetCollectHits();
}
template < TermPosFilter_e T, class NODE >
void ExtConditional_T<T,NODE>::Reset ( const ISphQwordSetup & tSetup )
{
BufferedNode_c::Reset();
TermAcceptor_T<T>::Reset();
m_tNode.Reset(tSetup);
}
//////////////////////////////////////////////////////////////////////////
TermAcceptor_T<TERM_POS_FIELD_LIMIT>::TermAcceptor_T ( ISphQword *, const XQNode_t * pNode, const ISphQwordSetup & )
: m_iMaxFieldPos ( pNode->m_dSpec.m_iFieldMaxPos )
{}
inline bool TermAcceptor_T<TERM_POS_FIELD_LIMIT>::IsAcceptableHit ( const ExtHit_t * pHit ) const
{
return HITMAN::GetPos ( pHit->m_uHitpos )<=m_iMaxFieldPos;
}
template<>
inline bool TermAcceptor_T<TERM_POS_FIELD_START>::IsAcceptableHit ( const ExtHit_t * pHit ) const
{
return HITMAN::GetPos ( pHit->m_uHitpos )==1;
}
template<>
inline bool TermAcceptor_T<TERM_POS_FIELD_END>::IsAcceptableHit ( const ExtHit_t * pHit ) const
{
return HITMAN::IsEnd ( pHit->m_uHitpos );
}
template<>
inline bool TermAcceptor_T<TERM_POS_FIELD_STARTEND>::IsAcceptableHit ( const ExtHit_t * pHit ) const
{
return HITMAN::GetPos ( pHit->m_uHitpos )==1 && HITMAN::IsEnd ( pHit->m_uHitpos );
}
TermAcceptor_T<TERM_POS_ZONES>::TermAcceptor_T ( ISphQword *, const XQNode_t * pNode, const ISphQwordSetup & tSetup )
: m_pZoneChecker ( tSetup.m_pZoneChecker )
, m_dZones ( pNode->m_dSpec.m_dZones )
{}
inline bool TermAcceptor_T<TERM_POS_ZONES>::IsAcceptableHit ( const ExtHit_t * pHit ) const
{
assert ( m_pZoneChecker );
if ( m_tLastZoneRowID!=pHit->m_tRowID )
m_iCheckFrom = 0;
m_tLastZoneRowID = pHit->m_tRowID;
// only check zones that actually match this document
for ( int i=m_iCheckFrom; i<m_dZones.GetLength(); i++ )
{
SphZoneHit_e eState = m_pZoneChecker->IsInZone ( m_dZones[i], pHit, NULL );
switch ( eState )
{
case SPH_ZONE_FOUND:
return true;
case SPH_ZONE_NO_DOCUMENT:
Swap ( m_dZones[i], m_dZones[m_iCheckFrom] );
m_iCheckFrom++;
break;
default:
break;
}
}
return false;
}
inline void TermAcceptor_T<TERM_POS_ZONES>::Reset()
{
m_tLastZoneRowID = INVALID_ROWID;
m_iCheckFrom = 0;
}
//////////////////////////////////////////////////////////////////////////
template < TermPosFilter_e T, class NODE >
const ExtDoc_t * ExtConditional_T<T,NODE>::GetDocsChunk()
{
// fetch more docs if needed
if ( !HasDocs(m_pRawDocs) )
{
m_pRawDocs = m_tNode.GetDocsChunk();
if ( !HasDocs(m_pRawDocs) )
return nullptr;
m_pRawDoc = m_pRawDocs;
m_pRawHit = m_tNode.GetHits(m_pRawDoc);
}
// filter the hits, and build the documents list
int iMyDoc = 0;
const ExtDoc_t * pDoc = m_pRawDoc;
const ExtHit_t * pHit = m_pRawHit;
while (true)
{
if ( iMyDoc==MAX_BLOCK_DOCS-1 )
break;
// did we touch all the hits we had? if so, we're fully done with
// current raw docs block, and should start a new one
if ( !HasHits(pHit) )
{
m_pRawDocs = m_tNode.GetDocsChunk();
if ( !HasDocs(m_pRawDocs) )
break;
pDoc = m_pRawDocs;
pHit = m_tNode.GetHits(pDoc);
continue;
}
// scan until next acceptable hit
while ( pHit->m_tRowID < pDoc->m_tRowID ) // skip leftovers
pHit++;
while ( HasHits(pHit) && !Acceptor_c::IsAcceptableHit(pHit) ) // skip unneeded hits
pHit++;
if ( !HasHits(pHit) ) // check for eof
continue;
// find and emit new document
while ( pDoc->m_tRowID<pHit->m_tRowID )
pDoc++; // FIXME? unsafe in broken cases
assert ( pDoc->m_tRowID==pHit->m_tRowID );
assert ( iMyDoc<MAX_BLOCK_DOCS-1 );
m_dMyDocs[iMyDoc++] = *pDoc;
m_dMyHits.Add ( *(pHit++) );
// copy acceptable hits for this document
for ( ; pHit->m_tRowID==pDoc->m_tRowID; pHit++ )
{
if ( Acceptor_c::IsAcceptableHit ( pHit ) )
m_dMyHits.Add ( *pHit );
}
}
m_pRawDoc = pDoc;
m_pRawHit = pHit;
assert ( iMyDoc>=0 && iMyDoc<MAX_BLOCK_DOCS );
m_dMyDocs[iMyDoc].m_tRowID = INVALID_ROWID;
PrintDocsChunk ( iMyDoc, m_tNode.GetAtomPos(), m_dMyDocs, "cond", this );
return iMyDoc ? m_dMyDocs : nullptr;
}
template < TermPosFilter_e T, class NODE >
void ExtConditional_T<T, NODE>::CollectHits ( const ExtDoc_t * pDocs )
{
CopyMatchingHits ( m_dHits, pDocs );
PrintHitsChunk ( m_dHits.GetLength(), m_tNode.GetAtomPos(), m_dHits.Begin(), this );
}
template < TermPosFilter_e T, class NODE >
void ExtConditional_T<T, NODE>::HintRowID ( RowID_t tRowID )
{
m_tNode.HintRowID ( tRowID );
}
template < TermPosFilter_e T, class NODE >
int ExtConditional_T<T, NODE>::GetQwords ( ExtQwordsHash_t & hQwords )
{
return m_tNode.GetQwords ( hQwords );
}
template < TermPosFilter_e T, class NODE >
void ExtConditional_T<T, NODE>::SetQwordsIDF ( const ExtQwordsHash_t & hQwords )
{
return m_tNode.SetQwordsIDF ( hQwords );
}
template < TermPosFilter_e T, class NODE >
void ExtConditional_T<T, NODE>::GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const
{
return m_tNode.GetTerms ( hQwords, dTermDupes );
}
template < TermPosFilter_e T, class NODE >
uint64_t ExtConditional_T<T, NODE>::GetWordID() const
{
return m_tNode.GetWordID();
}
template < TermPosFilter_e T, class NODE >
void ExtConditional_T<T, NODE>::SetAtomPos ( int iPos )
{
m_tNode.SetAtomPos(iPos);
}
template < TermPosFilter_e T, class NODE >
int ExtConditional_T<T, NODE>::GetAtomPos() const
{
return m_tNode.GetAtomPos();
}
//////////////////////////////////////////////////////////////////////////
ExtTwofer_c::ExtTwofer_c ( ExtNode_i * pFirst, ExtNode_i * pSecond )
{
Init ( pFirst, pSecond );
}
inline void ExtTwofer_c::Init ( ExtNode_i * pLeft, ExtNode_i * pRight )
{
m_pLeft = std::unique_ptr<ExtNode_i>(pLeft);
m_pRight = std::unique_ptr<ExtNode_i>(pRight);
m_pDocL = nullptr;
m_pDocR = nullptr;
m_uNodePosL = 0;
m_uNodePosR = 0;
m_iAtomPos = ( pLeft && pLeft->GetAtomPos() ) ? pLeft->GetAtomPos() : 0;
if ( pRight && pRight->GetAtomPos() && pRight->GetAtomPos()<m_iAtomPos && m_iAtomPos!=0 )
m_iAtomPos = pRight->GetAtomPos();
int64_t tmTimeout = 0;
if ( pLeft )
tmTimeout = pLeft->GetMaxTimeout();
if ( !tmTimeout && pRight )
tmTimeout = pRight->GetMaxTimeout();
SetMaxTimeout ( tmTimeout );
}
void ExtTwofer_c::Reset ( const ISphQwordSetup & tSetup )
{
m_pLeft->Reset ( tSetup );
m_pRight->Reset ( tSetup );
m_pDocL = nullptr;
m_pDocR = nullptr;
}
int ExtTwofer_c::GetQwords ( ExtQwordsHash_t & hQwords )
{
int iMax1 = m_pLeft->GetQwords ( hQwords );
int iMax2 = m_pRight->GetQwords ( hQwords );
return Max ( iMax1, iMax2 );
}
void ExtTwofer_c::SetQwordsIDF ( const ExtQwordsHash_t & hQwords )
{
m_pLeft->SetQwordsIDF ( hQwords );
m_pRight->SetQwordsIDF ( hQwords );
}
void ExtTwofer_c::GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const
{
m_pLeft->GetTerms ( hQwords, dTermDupes );
m_pRight->GetTerms ( hQwords, dTermDupes );
}
bool ExtTwofer_c::GotHitless ()
{
return m_pLeft->GotHitless() || m_pRight->GotHitless();
}
void ExtTwofer_c::DebugDumpT ( const char * sName, int iLevel )
{
DebugIndent ( iLevel );
printf ( "%s:\n", sName );
m_pLeft->DebugDump ( iLevel+1 );
m_pRight->DebugDump ( iLevel+1 );
}
void ExtTwofer_c::SetNodePos ( WORD uPosLeft, WORD uPosRight )
{
m_uNodePosL = uPosLeft;
m_uNodePosR = uPosRight;
}
void ExtTwofer_c::HintRowID ( RowID_t tRowID )
{
m_pLeft->HintRowID ( tRowID );
m_pRight->HintRowID ( tRowID );
}
uint64_t ExtTwofer_c::GetWordID () const
{
uint64_t dHash[2];
dHash[0] = m_pLeft->GetWordID();
dHash[1] = m_pRight->GetWordID();
return sphFNV64 ( dHash, sizeof(dHash) );
}
void ExtTwofer_c::SetCollectHits()
{
if ( m_pLeft )
m_pLeft->SetCollectHits();
if ( m_pRight )
m_pRight->SetCollectHits();
}
NodeEstimate_t ExtTwofer_c::Estimate ( int64_t iTotalDocs ) const
{
NodeEstimate_t tLeft = { 0.0f, 0, 0 };
if ( m_pLeft )
tLeft = m_pLeft->Estimate(iTotalDocs);
NodeEstimate_t tRight = { 0.0f, 0, 0 };
if ( m_pRight )
tRight = m_pRight->Estimate(iTotalDocs);
tLeft += tRight;
return tLeft;
}
void ExtTwofer_c::SetRowidBoundaries ( const RowIdBoundaries_t & tBoundaries )
{
if ( m_pLeft ) m_pLeft->SetRowidBoundaries(tBoundaries);
if ( m_pRight ) m_pRight->SetRowidBoundaries(tBoundaries);
}
//////////////////////////////////////////////////////////////////////////
ExtAnd_c::ExtAnd_c ( ExtNode_i * pLeft, ExtNode_i * pRight )
: ExtTwofer_c ( pLeft, pRight )
{
m_bEmpty = ( !m_pLeft || !m_pLeft->GetDocsCount() ) || ( !m_pRight || !m_pRight->GetDocsCount() );
if ( m_pLeft && m_pLeft->GetDocsCount() && ( !m_pRight || !m_pRight->GetDocsCount() ) )
std::swap ( m_pLeft, m_pRight );
}
const ExtDoc_t * ExtAnd_c::GetDocsChunk()
{
const ExtDoc_t * pDocL = m_pDocL;
const ExtDoc_t * pDocR = m_pDocR;
int iDoc = 0;
while ( iDoc<MAX_BLOCK_DOCS-1 )
{
if ( !WarmupDocs ( pDocL, pDocR, m_pLeft.get() ) )
break;
if ( !WarmupDocs ( pDocR, pDocL, m_pRight.get() ) )
break;
assert ( pDocL && pDocR );
if ( pDocL->m_tRowID==pDocR->m_tRowID )
{
// emit it
ExtDoc_t & tDoc = m_dDocs[iDoc++];
tDoc.m_tRowID = pDocL->m_tRowID;
tDoc.m_uDocFields = pDocL->m_uDocFields | pDocR->m_uDocFields; // not necessary
tDoc.m_fTFIDF = pDocL->m_fTFIDF + pDocR->m_fTFIDF;
// skip it
pDocL++;
pDocR++;
}
else if ( pDocL->m_tRowID<pDocR->m_tRowID )
pDocL++;
else
pDocR++;
}
m_pDocL = pDocL;
m_pDocR = pDocR;
return ReturnDocsChunk ( iDoc, "and" );
}
static inline bool IsHitLess ( const ExtHit_t * pHit1, const ExtHit_t * pHit2 )
{
assert ( pHit1 && pHit2 );
return ( pHit1->m_uHitpos<pHit2->m_uHitpos ) || ( pHit1->m_uHitpos==pHit2->m_uHitpos && pHit1->m_uQuerypos<=pHit2->m_uQuerypos );
}
struct CmpAndHitReverse_fn
{
inline bool IsLess ( const ExtHit_t & a, const ExtHit_t & b ) const
{
return ( a.m_tRowID<b.m_tRowID || ( a.m_tRowID==b.m_tRowID && a.m_uHitpos<b.m_uHitpos ) || ( a.m_tRowID==b.m_tRowID && a.m_uHitpos==b.m_uHitpos && a.m_uQuerypos>b.m_uQuerypos ) );
}
};
void ExtAnd_c::CollectHits ( const ExtDoc_t * pDocs )
{
if ( !pDocs )
return;
const ExtHit_t * pCurL = m_pLeft->GetHits(pDocs);
const ExtHit_t * pCurR = m_pRight->GetHits(pDocs);
const WORD uNodePosL = m_uNodePosL;
const WORD uNodePosR = m_uNodePosR;
RowID_t tMatchedRowID = INVALID_ROWID;
while ( HasHits(pCurL) && HasHits(pCurR) )
{
bool bLeft = false;
if ( pCurL->m_tRowID < pCurR->m_tRowID )
{
if ( pCurL->m_tRowID==tMatchedRowID )
m_dHits.Add ( *pCurL++ );
else
{
pCurL++;
continue;
}
bLeft = true;
}
else if ( pCurL->m_tRowID > pCurR->m_tRowID )
{
if ( pCurR->m_tRowID==tMatchedRowID )
m_dHits.Add ( *pCurR++ );
else
{
pCurR++;
continue;
}
}
else
{
tMatchedRowID = pCurL->m_tRowID;
if ( IsHitLess ( pCurL, pCurR ) )
{
m_dHits.Add ( *pCurL++ );
bLeft = true;
}
else
m_dHits.Add ( *pCurR++ );
}
if ( bLeft )
{
if ( uNodePosL!=0 )
m_dHits.Last().m_uNodepos = uNodePosL;
}
else
{
if ( uNodePosR!=0 )
m_dHits.Last().m_uNodepos = uNodePosR;
}
}
while ( HasHits(pCurL) && pCurL->m_tRowID==tMatchedRowID )
{
m_dHits.Add ( *pCurL++ );
if ( uNodePosL!=0 )
m_dHits.Last().m_uNodepos = uNodePosL;
}
while ( HasHits(pCurR) && pCurR->m_tRowID==tMatchedRowID )
{
m_dHits.Add ( *pCurR++ );
if ( uNodePosR!=0 )
m_dHits.Last().m_uNodepos = uNodePosR;
}
if ( m_bQPosReverse )
m_dHits.Sort ( CmpAndHitReverse_fn() );
}
NodeEstimate_t ExtAnd_c::Estimate ( int64_t iTotalDocs ) const
{
assert ( m_pLeft && m_pRight );
auto tLeftEstimate = m_pLeft->Estimate(iTotalDocs);
auto tRightEstimate = m_pRight->Estimate(iTotalDocs);
float fRatio = float(tLeftEstimate.m_fCost)/iTotalDocs*float(tRightEstimate.m_fCost)/iTotalDocs;
float fCost = CalcFTIntersectCost ( tLeftEstimate, tRightEstimate, iTotalDocs, MAX_BLOCK_DOCS, MAX_BLOCK_DOCS );
return { fCost, int64_t(fRatio*iTotalDocs), tLeftEstimate.m_iTerms+tRightEstimate.m_iTerms };
}
void ExtAnd_c::DebugDump ( int iLevel )
{
DebugDumpT ( "ExtAnd", iLevel );
}
//////////////////////////////////////////////////////////////////////////
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
void ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::NodeInfo_t::UpdateWideFieldFlag ( const ISphQwordSetup & tSetup )
{
m_bHasWideFields = false;
if ( tSetup.m_bHasWideFields )
for ( int i=1; i<FieldMask_t::SIZE && !m_bHasWideFields; i++ )
if ( m_dQueriedFields[i] )
m_bHasWideFields = true;
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
bool ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::NodeInfo_t::FitsFields() const
{
if ( !m_bHasWideFields )
{
// fields 0-31 can be quickly checked right here, right now
if (!( m_pQword->m_dQwordFields.GetMask32() & m_dQueriedFields.GetMask32() ))
return false;
} else
{
// fields 32+ need to be checked with CollectHitMask() and stuff
m_pQword->CollectHitMask();
bool bHasSameFields = false;
for ( int i=0; i<FieldMask_t::SIZE && !bHasSameFields; i++ )
bHasSameFields = ( m_pQword->m_dQwordFields[i] & m_dQueriedFields[i] )!=0;
if ( !bHasSameFields )
return false;
}
return true;
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::HitWithQpos_t::HitWithQpos_t ( int iNode, Hitpos_t uHit, WORD uQueryPos )
: m_iNode ( iNode )
, m_uHit ( uHit )
, m_uQueryPos ( uQueryPos )
{}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::ExtMultiAnd_T ( const VecTraits_T<XQNode_t*> & dXQNodes, const ISphQwordSetup & tSetup )
: ExtNode_c { tSetup.m_iMaxTimer }
, m_dWordIds ( dXQNodes.GetLength() )
, m_tQueue ( dXQNodes.GetLength() )
{
m_dNodes.Resize ( dXQNodes.GetLength() );
ARRAY_FOREACH ( i, m_dNodes )
{
NodeInfo_t & tNode = m_dNodes[i];
const XQNode_t & tXQNode = *dXQNodes[i];
tNode.m_pQword = CreateQueryWord ( tXQNode.m_dWords[0], tSetup );
assert ( tNode.m_pQword );
tNode.m_iAtomPos = tNode.m_pQword->m_iAtomPos;
tNode.m_uNodepos = (WORD)i;
tNode.m_bNotWeighted = tXQNode.m_bNotWeighted;
tNode.m_dQueriedFields = tXQNode.m_dSpec.m_dFieldMask;
tNode.UpdateWideFieldFlag(tSetup);
}
m_dNodes.Sort ( SelectivitySorter_t() );
m_iNodesSet = m_dNodes.GetLength();
m_pWarning = tSetup.m_pWarning;
m_pStats = tSetup.m_pStats;
m_pNanoBudget = m_pStats ? m_pStats->m_pNanoBudget : NULL;
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::~ExtMultiAnd_T()
{
for ( auto & i : m_dNodes )
SafeDelete ( i.m_pQword );
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
DWORD ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::GetDocFieldsMask() const
{
DWORD uMask = 0;
for ( const auto & i : m_dNodes )
uMask |= i.m_pQword->m_dQwordFields.GetMask32() & i.m_dQueriedFields.GetMask32();
return uMask;
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
float ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::GetTFIDF() const
{
float fTFIDF = 0.0f;
if constexpr ( USE_BM25 )
{
for ( const auto & i : m_dNodes )
fTFIDF += float(i.m_pQword->m_uMatchHits) / float(i.m_pQword->m_uMatchHits+SPH_BM25_K1) * i.m_fIDF;
}
return fTFIDF;
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
RowID_t ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::Advance ( int iNode )
{
NodeInfo_t & tNode = m_dNodes[iNode];
do
{
tNode.m_tRowID = tNode.m_pQword->GetNextDoc().m_tRowID;
}
while ( tNode.m_tRowID!=INVALID_ROWID && !tNode.FitsFields() );
return tNode.m_tRowID;
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
RowID_t ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::Advance ( int iNode, RowID_t tRowID )
{
NodeInfo_t & tNode = m_dNodes[iNode];
if ( tRowID==tNode.m_tRowID )
return tRowID;
tNode.m_tRowID = tNode.m_pQword->AdvanceTo ( tRowID );
while ( tNode.m_tRowID!=INVALID_ROWID )
{
if constexpr ( ROWID_LIMITS )
{
// don't check left boundary as we already enforced it when we advanced node #0
if ( tNode.m_tRowID > m_tBoundaries.m_tMaxRowID )
{
tNode.m_tRowID = INVALID_ROWID;
break;
}
}
if ( tNode.FitsFields() )
break;
tNode.m_tRowID = tNode.m_pQword->GetNextDoc().m_tRowID;
}
return tNode.m_tRowID;
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
bool ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::AdvanceQwords()
{
RowID_t tMaxRowID = m_dNodes[0].m_tRowID;
for ( int i=1; i < m_dNodes.GetLength(); i++ )
{
NodeInfo_t & tCurNode = m_dNodes[i];
if ( tCurNode.m_tRowID==tMaxRowID )
continue;
Advance ( i, tMaxRowID );
if ( tCurNode.m_tRowID==INVALID_ROWID )
return false;
else if ( tCurNode.m_tRowID>tMaxRowID )
{
if ( Advance ( 0, tCurNode.m_tRowID )==INVALID_ROWID )
return false;
tMaxRowID = m_dNodes[0].m_tRowID;
i = 0;
}
}
return true;
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
const ExtDoc_t * ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::GetDocsChunk()
{
// since we're working directly with qwords, we need to check all those things here and not in ExtTerm
// max_query_time
if ( TimeExceeded () )
{
if ( m_pWarning )
*m_pWarning = "query time exceeded max_query_time";
return NULL;
}
// max_predicted_time
if ( m_pNanoBudget && *m_pNanoBudget<0 )
{
if ( m_pWarning )
*m_pWarning = "predicted query time exceeded max_predicted_time";
return nullptr;
}
if ( sph::TimeExceeded ( m_iCheckTimePoint ) )
{
// interrupt by sitgerm
if ( g_bInterruptNow )
{
if ( m_pWarning )
*m_pWarning = "Server shutdown in progress";
return nullptr;
}
if ( session::GetKilled() )
{
if ( m_pWarning )
*m_pWarning = "query was killed";
return nullptr;
}
Threads::Coro::RescheduleAndKeepCrashQuery();
}
if ( m_bFirstChunk )
{
if ( m_iNodesSet!=m_dNodes.GetLength() || !m_dNodes[0].m_pQword->m_iDocs )
return nullptr;
if constexpr ( ROWID_LIMITS )
Advance ( 0, m_tBoundaries.m_tMinRowID );
else
Advance(0);
m_bFirstChunk = false;
}
StoredMultiHit_t * pStoredHit = nullptr;
StoredMultiHit_t * pFirstHit = nullptr;
if ( m_bCollectHits )
{
int iLength = m_dStoredHits.GetLength();
m_dStoredHits.Reserve ( iLength+MAX_BLOCK_DOCS );
pStoredHit = m_dStoredHits.End();
pFirstHit = pStoredHit-iLength; // hack to get to m_pData
}
int iDoc = 0;
while ( iDoc<MAX_BLOCK_DOCS-1 )
{
if ( m_dNodes[0].m_tRowID==INVALID_ROWID )
break;
if ( !AdvanceQwords() )
{
m_dNodes[0].m_tRowID=INVALID_ROWID;
break;
}
RowID_t tMatchedRowID = m_dNodes[0].m_tRowID;
ExtDoc_t & tDoc = m_dDocs[iDoc++];
tDoc.m_tRowID = tMatchedRowID;
tDoc.m_uDocFields = GetDocFieldsMask();
tDoc.m_fTFIDF = GetTFIDF();
if ( m_bCollectHits )
{
pStoredHit->m_tRowID = tMatchedRowID;
pStoredHit->m_dHitlistOffsets.Reset(m_dNodes.GetLength());
ARRAY_FOREACH ( i, m_dNodes )
pStoredHit->m_dHitlistOffsets[i] = m_dNodes[i].m_pQword->m_iHitlistPos;
pStoredHit++;
}
// we assume that the 1st node returns the least docs
Advance(0);
}
if ( m_bCollectHits )
m_dStoredHits.Resize ( pStoredHit-pFirstHit );
if (m_pStats)
m_pStats->m_iFetchedDocs += iDoc;
if ( m_pNanoBudget )
*m_pNanoBudget -= g_iPredictorCostDoc*iDoc;
return ReturnDocsChunk ( iDoc, "multiand" );
}
static inline bool IsHitLess ( Hitpos_t uHitposL, WORD uQueryPosL, Hitpos_t uHitposR, WORD uQueryPosR )
{
return uHitposL<uHitposR || ( uHitposL==uHitposR && uQueryPosL<=uQueryPosR );
}
struct HitWithQpos_t
{
int m_iNode;
Hitpos_t m_uHit;
WORD m_uQueryPos;
HitWithQpos_t ( int iNode, Hitpos_t uHit, WORD uQueryPos )
: m_iNode ( iNode )
, m_uHit ( uHit )
, m_uQueryPos ( uQueryPos )
{}
bool operator < ( const HitWithQpos_t & rhs ) const
{
return IsHitLess ( rhs.m_uHit, rhs.m_uQueryPos, m_uHit, m_uQueryPos );
}
};
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
void ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::InitHitMerge ( HitInfo_t & tHitInfo, int iNode, const StoredMultiHit_t & tStoredHit )
{
const NodeInfo_t & tNode = m_dNodes[iNode];
tHitInfo.m_pQword = tNode.m_pQword;
assert ( tHitInfo.m_pQword );
tHitInfo.m_uNodePos = tNode.m_uNodepos;
tHitInfo.m_uQueryPos = (WORD)tNode.m_iAtomPos;
tHitInfo.m_pQword->SeekHitlist ( tStoredHit.m_dHitlistOffsets[iNode] );
tHitInfo.m_uHitpos = tHitInfo.m_pQword->GetNextHit();
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
void ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::AddHit ( RowID_t tRowID, HitInfo_t & tHit, int iNode )
{
if constexpr(TEST_FIELDS)
{
if ( m_dNodes[iNode].m_dQueriedFields.Test ( HITMAN::GetField ( tHit.m_uHitpos ) ) )
m_dHits.Add ( ExtHit_t { tRowID, tHit.m_uHitpos, tHit.m_uQueryPos, tHit.m_uNodePos, 1, 1, 1, 0 } );
}
else
m_dHits.Add ( ExtHit_t { tRowID, tHit.m_uHitpos, tHit.m_uQueryPos, tHit.m_uNodePos, 1, 1, 1, 0 } );
tHit.m_uHitpos = tHit.m_pQword->GetNextHit();
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
void ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::DoHitMerge ( RowID_t tRowID, HitInfo_t & tLeft, HitInfo_t & tRight )
{
while ( tLeft.m_uHitpos!=EMPTY_HIT && tRight.m_uHitpos!=EMPTY_HIT )
{
if ( IsHitLess ( tLeft, tRight ) )
AddHit ( tRowID, tLeft, 0 );
else
AddHit ( tRowID, tRight, 1 );
}
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
void ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::DoHitMerge ( RowID_t tRowID, HitInfo_t & tHit1, HitInfo_t & tHit2, HitInfo_t & tHit3 )
{
while ( tHit1.m_uHitpos!=EMPTY_HIT && tHit2.m_uHitpos!=EMPTY_HIT && tHit3.m_uHitpos!=EMPTY_HIT )
{
if ( IsHitLess ( tHit1, tHit2 ) && IsHitLess ( tHit1, tHit3 ) )
AddHit ( tRowID, tHit1, 0 );
else if ( IsHitLess ( tHit2, tHit1 ) && IsHitLess ( tHit2, tHit3 ) )
AddHit ( tRowID, tHit2, 1 );
else
AddHit ( tRowID, tHit3, 2 );
}
if ( tHit1.m_uHitpos==EMPTY_HIT )
DoHitMerge ( tRowID, tHit2, tHit3 );
else if ( tHit2.m_uHitpos==EMPTY_HIT )
DoHitMerge ( tRowID, tHit1, tHit3 );
else
DoHitMerge ( tRowID, tHit1, tHit2 );
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
void ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::CopyHits ( RowID_t tRowID, HitInfo_t & tHitInfo, int iNode )
{
while ( tHitInfo.m_uHitpos!=EMPTY_HIT )
{
if constexpr (TEST_FIELDS)
{
if ( m_dNodes[iNode].m_dQueriedFields.Test ( HITMAN::GetField ( tHitInfo.m_uHitpos ) ) )
m_dHits.Add ( ExtHit_t { tRowID, tHitInfo.m_uHitpos, tHitInfo.m_uQueryPos, tHitInfo.m_uNodePos, 1, 1, 1, 0 } );
}
else
m_dHits.Add ( ExtHit_t { tRowID, tHitInfo.m_uHitpos, tHitInfo.m_uQueryPos, tHitInfo.m_uNodePos, 1, 1, 1, 0 } );
tHitInfo.m_uHitpos = tHitInfo.m_pQword->GetNextHit();
}
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
void ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::MergeHits2 ( const StoredMultiHit_t & tStoredHit )
{
const int NUM_STREAMS = 2;
HitInfo_t dHits[NUM_STREAMS];
RowID_t tRowID = tStoredHit.m_tRowID;
for ( int i = 0; i < NUM_STREAMS; i++ )
InitHitMerge ( dHits[i], i, tStoredHit );
DoHitMerge ( tRowID, dHits[0], dHits[1] );
for ( int i = 0; i < NUM_STREAMS; i++ )
CopyHits ( tRowID, dHits[i], i );
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
void ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::MergeHits3 ( const StoredMultiHit_t & tStoredHit )
{
const int NUM_STREAMS = 3;
HitInfo_t dHits[NUM_STREAMS];
RowID_t tRowID = tStoredHit.m_tRowID;
for ( int i = 0; i < NUM_STREAMS; i++ )
InitHitMerge ( dHits[i], i, tStoredHit );
DoHitMerge ( tRowID, dHits[0], dHits[1], dHits[2] );
for ( int i = 0; i < NUM_STREAMS; i++ )
CopyHits ( tRowID, dHits[i], i );
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
void ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::PushNextHit ( int iNode )
{
NodeInfo_t & tNode = m_dNodes[iNode];
while ( !tNode.m_bHitsOver )
{
Hitpos_t uHit = tNode.m_pQword->GetNextHit();
if ( uHit==EMPTY_HIT )
tNode.m_bHitsOver = true;
else if ( tNode.m_dQueriedFields.Test ( HITMAN::GetField(uHit) ) )
{
m_tQueue.Push ( HitWithQpos_t ( iNode, uHit, (WORD)tNode.m_iAtomPos ) );
break;
}
}
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
void ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::MergeHitsN ( const StoredMultiHit_t & tStoredHit )
{
// setup hitlist reader
ARRAY_FOREACH ( i, m_dNodes )
{
m_dNodes[i].m_pQword->SeekHitlist ( tStoredHit.m_dHitlistOffsets[i] );
m_dNodes[i].m_bHitsOver = false;
}
// merge hitlists from all nodes for a given rowid
assert ( !m_tQueue.GetLength() );
ARRAY_FOREACH ( i, m_dNodes )
PushNextHit(i);
while ( m_tQueue.GetLength() )
{
const HitWithQpos_t & tHitWithQpos = m_tQueue.Root();
int iNode = tHitWithQpos.m_iNode;
NodeInfo_t & tNode = m_dNodes[iNode];
ExtHit_t & tHit = m_dHits.Add();
tHit.m_tRowID = tStoredHit.m_tRowID;
tHit.m_uHitpos = tHitWithQpos.m_uHit;
tHit.m_uQuerypos = tHitWithQpos.m_uQueryPos; // assume less that 64K words per query
tHit.m_uWeight = tHit.m_uMatchlen = tHit.m_uSpanlen = 1;
tHit.m_uNodepos = tNode.m_uNodepos;
m_tQueue.Pop();
PushNextHit(iNode);
}
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
void ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::CollectHits ( const ExtDoc_t * pMatched )
{
if ( !pMatched )
return;
m_dStoredHits.Add().m_tRowID = INVALID_ROWID;
StoredMultiHit_t * pStoredHit = m_dStoredHits.Begin();
for ( ; HasDocs(pMatched); pMatched++ )
{
while ( pStoredHit->m_tRowID < pMatched->m_tRowID )
pStoredHit++;
if ( pStoredHit->m_tRowID!=pMatched->m_tRowID )
continue;
switch ( m_dNodes.GetLength() )
{
case 2: MergeHits2 ( *pStoredHit ); break;
case 3: MergeHits3 ( *pStoredHit ); break;
default: MergeHitsN ( *pStoredHit ); break;
}
}
int nHits = m_dHits.GetLength();
if ( m_pStats )
m_pStats->m_iFetchedHits += nHits;
if ( m_pNanoBudget )
*m_pNanoBudget -= g_iPredictorCostHit*nHits;
// look at ExtTerm_T for more info on this code
int nProcessed = int ( pStoredHit-m_dStoredHits.Begin() );
m_dStoredHits.Pop(); // end marker
m_dStoredHits.Remove ( 0, nProcessed );
if ( m_bQPosReverse )
m_dHits.Sort ( CmpAndHitReverse_fn() );
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
void ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::Reset ( const ISphQwordSetup & tSetup )
{
m_bFirstChunk = true;
m_iNodesSet = 0;
SetMaxTimeout ( tSetup.m_iMaxTimer );
for ( auto & i : m_dNodes )
{
i.m_tRowID = INVALID_ROWID;
i.m_bHitsOver = false;
i.m_pQword->Reset ();
// need to track active nodes for every segment
// however AND requires all nodes that is why can use fast reject
if ( tSetup.QwordSetup ( i.m_pQword ) )
m_iNodesSet++;
}
m_dStoredHits.Resize(0);
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
int ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::GetQword ( NodeInfo_t & tNode, ExtQwordsHash_t & hQwords )
{
tNode.m_fIDF = 0.0f;
ExtQword_t * pQword = hQwords ( tNode.m_pQword->m_sWord );
if ( !tNode.m_bNotWeighted && pQword && !pQword->m_bExcluded )
pQword->m_iQueryPos = Min ( pQword->m_iQueryPos, tNode.m_pQword->m_iAtomPos );
if ( tNode.m_bNotWeighted || pQword )
return tNode.m_pQword->m_bExcluded ? -1 : tNode.m_pQword->m_iAtomPos;
tNode.m_fIDF = -1.0f;
ExtQword_t tInfo;
tInfo.m_sWord = tNode.m_pQword->m_sWord;
tInfo.m_sDictWord = tNode.m_pQword->m_sDictWord;
tInfo.m_iDocs = tNode.m_pQword->m_iDocs;
tInfo.m_iHits = tNode.m_pQword->m_iHits;
tInfo.m_iQueryPos = tNode.m_pQword->m_iAtomPos;
tInfo.m_fIDF = -1.0f; // suppress gcc 4.2.3 warning
tInfo.m_fBoost = tNode.m_pQword->m_fBoost;
tInfo.m_bExpanded = tNode.m_pQword->m_bExpanded;
tInfo.m_bExcluded = tNode.m_pQword->m_bExcluded;
hQwords.Add ( tInfo, tNode.m_pQword->m_sWord );
return tNode.m_pQword->m_bExcluded ? -1 : tNode.m_pQword->m_iAtomPos;
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
int ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::GetQwords ( ExtQwordsHash_t & hQwords )
{
int iMax = -1;
for ( auto & i : m_dNodes )
{
int iRes = GetQword ( i, hQwords );
iMax = Max ( iRes, iMax );
}
return iMax;
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
void ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::SetQwordsIDF ( const ExtQwordsHash_t & hQwords )
{
for ( auto & i : m_dNodes )
if ( i.m_fIDF<0.0f )
{
assert ( hQwords ( i.m_pQword->m_sWord ) );
i.m_fIDF = hQwords ( i.m_pQword->m_sWord )->m_fIDF;
}
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
void ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const
{
for ( const auto & i : m_dNodes )
if ( i.m_bNotWeighted || !i.m_pQword->m_bExcluded )
{
ExtQword_t & tQword = hQwords[i.m_pQword->m_sWord];
TermPos_t & tPos = dTermDupes.Add();
tPos.m_uAtomPos = (WORD)i.m_pQword->m_iAtomPos;
tPos.m_uQueryPos = (WORD)tQword.m_iQueryPos;
}
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
uint64_t ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::GetWordID() const
{
ARRAY_FOREACH ( i, m_dNodes )
{
NodeInfo_t & tNode = m_dNodes[i];
if ( tNode.m_pQword->m_uWordID )
m_dWordIds[i] = tNode.m_pQword->m_uWordID;
else
m_dWordIds[i] = sphFNV64 ( tNode.m_pQword->m_sDictWord.cstr() );
}
return sphFNV64 ( m_dWordIds.Begin(), (int) m_dWordIds.GetLengthBytes() );
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
int ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::GetDocsCount() const
{
if ( !m_dNodes.GetLength() || !m_dNodes[0].m_pQword->m_iDocs )
return 0;
return ExtNode_c::GetDocsCount();
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
void ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::HintRowID ( RowID_t tRowID )
{
if ( !m_dNodes[0].m_pQword->m_iDocs )
return;
if constexpr ( ROWID_LIMITS )
tRowID = Max ( tRowID, m_tBoundaries.m_tMinRowID );
if ( m_bFirstChunk || ( m_dNodes[0].m_tRowID!=INVALID_ROWID && tRowID>m_dNodes[0].m_tRowID ) )
{
if ( m_bFirstChunk && m_iNodesSet!=m_dNodes.GetLength() )
return;
Advance ( 0, tRowID );
m_bFirstChunk = false;
}
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
void ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::DebugDump ( int iLevel )
{
DebugIndent ( iLevel );
printf ( "ExtMultiAnd\n" );
DebugIndent ( iLevel+1 );
for ( const auto & i : m_dNodes )
{
printf ( "%s at: %d ", i.m_pQword->m_sWord.cstr(), i.m_pQword->m_iAtomPos );
if ( i.m_dQueriedFields.TestAll(true) )
printf ( "(all)\n" );
else
{
bool bFirst = true;
printf ( "in: " );
for ( int iField=0; iField<SPH_MAX_FIELDS; iField++ )
{
if ( i.m_dQueriedFields.Test ( iField ) )
{
if ( !bFirst )
printf ( ", " );
printf ( "%d", iField );
bFirst = false;
}
}
printf ( "\n" );
}
}
}
static float CalcQwordReadCost ( ISphQword * pQword )
{
assert(pQword);
return float(pQword->m_iDocs)*COST_SCALE*55.0f;
}
template <bool USE_BM25,bool TEST_FIELDS,bool ROWID_LIMITS>
NodeEstimate_t ExtMultiAnd_T<USE_BM25,TEST_FIELDS,ROWID_LIMITS>::Estimate ( int64_t iTotalDocs ) const
{
float fRatio = 1.0f;
float fCostLeft = 0.0f;
ARRAY_FOREACH ( i, m_dNodes )
{
const auto & tNode = m_dNodes[i];
assert(tNode.m_pQword);
if (!i)
fCostLeft = CalcQwordReadCost ( tNode.m_pQword );
else
{
float fCostRight = CalcQwordReadCost ( tNode.m_pQword );
NodeEstimate_t tEst1 = { fCostLeft, int64_t(fRatio*iTotalDocs), i };
NodeEstimate_t tEst2 = { fCostRight, tNode.m_pQword->m_iDocs, 1 };
fCostLeft = CalcFTIntersectCost ( tEst1, tEst2, iTotalDocs, MAX_BLOCK_DOCS, MAX_BLOCK_DOCS );
}
fRatio *= float(tNode.m_pQword->m_iDocs) / iTotalDocs;
}
return { fCostLeft, int64_t(fRatio*iTotalDocs), m_dNodes.GetLength() };
}
//////////////////////////////////////////////////////////////////////////
bool ExtAndZonespanned_c::IsSameZonespan ( const ExtHit_t * pHit1, const ExtHit_t * pHit2 ) const
{
for ( auto iZone : m_dZones )
{
int iSpan1, iSpan2;
if ( m_pZoneChecker->IsInZone ( iZone, pHit1, &iSpan1 )==SPH_ZONE_FOUND && m_pZoneChecker->IsInZone ( iZone, pHit2, &iSpan2 )==SPH_ZONE_FOUND )
{
assert ( iSpan1>=0 && iSpan2>=0 );
if ( iSpan1==iSpan2 )
return true;
}
}
return false;
}
void ExtAndZonespanned_c::CollectHits ( const ExtDoc_t * pDocs )
{
if ( !pDocs )
return;
const ExtHit_t * pCurL = m_pLeft->GetHits(pDocs);
const ExtHit_t * pCurR = m_pRight->GetHits(pDocs);
const WORD uNodePosL = m_uNodePosL;
const WORD uNodePosR = m_uNodePosR;
while ( HasHits(pCurL) && HasHits(pCurR) )
{
if ( pCurL->m_tRowID < pCurR->m_tRowID )
pCurL++;
else if ( pCurL->m_tRowID > pCurR->m_tRowID )
pCurR++;
else
{
if ( IsHitLess ( pCurL, pCurR ) )
{
if ( IsSameZonespan ( pCurL, pCurR ) )
{
m_dHits.Add ( *pCurL );
if ( uNodePosL!=0 )
m_dHits.Last().m_uNodepos = uNodePosL;
}
pCurL++;
}
else
{
if ( IsSameZonespan ( pCurL, pCurR ) )
{
m_dHits.Add ( *pCurR );
if ( uNodePosR!=0 )
m_dHits.Last().m_uNodepos = uNodePosR;
}
pCurR++;
}
}
}
if ( m_bQPosReverse )
m_dHits.Sort ( CmpAndHitReverse_fn() );
}
void ExtAndZonespanned_c::DebugDump ( int iLevel )
{
DebugDumpT ( "ExtAndZonespan", iLevel );
}
//////////////////////////////////////////////////////////////////////////
ExtOr_c::ExtOr_c ( ExtNode_i * pLeft, ExtNode_i * pRight )
: ExtTwofer_c ( pLeft, pRight )
{}
const ExtDoc_t * ExtOr_c::GetDocsChunk()
{
int iDoc = 0;
const ExtDoc_t * pDocL = m_pDocL;
const ExtDoc_t * pDocR = m_pDocR;
while ( iDoc<MAX_BLOCK_DOCS-1 )
{
if ( !HasDocs(pDocL) )
{
pDocL = m_pLeft->GetDocsChunk();
if ( !pDocL && TimeExceeded() )
break;
}
if ( !HasDocs(pDocR) )
{
pDocR = m_pRight->GetDocsChunk();
if ( !pDocR && TimeExceeded() )
break;
}
if ( !HasDocs(pDocL) && !HasDocs(pDocR) )
break;
ExtDoc_t & tNewDoc = m_dDocs[iDoc];
// merge lists while we can, copy tail while if we can not
if ( HasDocs(pDocL) && HasDocs(pDocR) )
{
if ( pDocL->m_tRowID==pDocR->m_tRowID )
{
tNewDoc = *pDocL;
tNewDoc.m_uDocFields = pDocL->m_uDocFields | pDocR->m_uDocFields; // not necessary
tNewDoc.m_fTFIDF = pDocL->m_fTFIDF + pDocR->m_fTFIDF;
pDocL++;
pDocR++;
}
else if ( pDocL->m_tRowID<pDocR->m_tRowID )
tNewDoc = *pDocL++;
else
tNewDoc = *pDocR++;
}
else if ( HasDocs(pDocL) )
tNewDoc = *pDocL++;
else
tNewDoc = *pDocR++;
iDoc++;
}
m_pDocL = pDocL;
m_pDocR = pDocR;
return ReturnDocsChunk ( iDoc, "or" );
}
void ExtOr_c::CollectHits ( const ExtDoc_t * pDocs )
{
if ( !pDocs )
return;
const ExtHit_t * pCurL = m_pLeft->GetHits(pDocs);
const ExtHit_t * pCurR = m_pRight->GetHits(pDocs);
// merge, while possible
while ( HasHits(pCurL) && HasHits(pCurR) )
{
if ( pCurL->m_tRowID < pCurR->m_tRowID )
m_dHits.Add ( *pCurL++ );
else if ( pCurL->m_tRowID > pCurR->m_tRowID )
m_dHits.Add ( *pCurR++ );
else
{
if ( IsHitLess ( pCurL, pCurR ) )
m_dHits.Add ( *pCurL++ );
else
m_dHits.Add ( *pCurR++ );
}
}
while ( HasHits(pCurL) )
m_dHits.Add ( *pCurL++ );
while ( HasHits(pCurR) )
m_dHits.Add ( *pCurR++ );
}
void ExtOr_c::DebugDump ( int iLevel )
{
DebugDumpT ( "ExtOr", iLevel );
}
NodeEstimate_t ExtOr_c::Estimate ( int64_t iTotalDocs ) const
{
assert ( m_pLeft && m_pRight );
auto tLeftEstimate = m_pLeft->Estimate(iTotalDocs);
auto tRightEstimate = m_pRight->Estimate(iTotalDocs);
float fIntersection = float(tLeftEstimate.m_iDocs)/iTotalDocs*float(tRightEstimate.m_iDocs)/iTotalDocs;
int64_t iIntersectionDocs = int64_t(fIntersection*iTotalDocs);
int64_t iResDocs = tLeftEstimate.m_iDocs+tRightEstimate.m_iDocs>=iIntersectionDocs ? tLeftEstimate.m_iDocs+tRightEstimate.m_iDocs-iIntersectionDocs : iIntersectionDocs;
float fMergeCost = float(tLeftEstimate.m_iDocs + tRightEstimate.m_iDocs)*COST_SCALE*10.0f;
return { tLeftEstimate.m_fCost + tRightEstimate.m_fCost + fMergeCost, iResDocs, tLeftEstimate.m_iTerms + tRightEstimate.m_iTerms };
}
//////////////////////////////////////////////////////////////////////////
ExtMaybe_c::ExtMaybe_c ( ExtNode_i * pLeft, ExtNode_i * pRight )
: ExtOr_c ( pLeft, pRight )
{}
// returns documents from left subtree only
//
// each call returns only one document and rewinds docs in rhs to look for the
// same docID as in lhs
//
// we do this to return hits from rhs too which we need to affect match rank
const ExtDoc_t * ExtMaybe_c::GetDocsChunk()
{
const ExtDoc_t * pDocL = m_pDocL;
const ExtDoc_t * pDocR = m_pDocR;
int iDoc = 0;
bool bRightEmpty = false;
while ( iDoc<MAX_BLOCK_DOCS-1 )
{
if ( !WarmupDocs ( pDocL, m_pLeft.get() ) )
break;
if ( !bRightEmpty )
bRightEmpty = !WarmupDocs ( pDocR, m_pRight.get() );
if ( !bRightEmpty )
{
if ( pDocL->m_tRowID==pDocR->m_tRowID )
{
m_dDocs[iDoc] = *pDocL;
m_dDocs[iDoc].m_uDocFields = pDocL->m_uDocFields | pDocR->m_uDocFields;
m_dDocs[iDoc].m_fTFIDF = pDocL->m_fTFIDF + pDocR->m_fTFIDF;
iDoc++;
pDocL++;
pDocR++;
}
else if ( pDocL->m_tRowID<pDocR->m_tRowID )
m_dDocs[iDoc++] = *pDocL++;
else
pDocR++;
}
else
m_dDocs[iDoc++] = *pDocL++;
}
m_pDocL = pDocL;
m_pDocR = pDocR;
return ReturnDocsChunk ( iDoc, "maybe" );
}
void ExtMaybe_c::DebugDump ( int iLevel )
{
DebugDumpT ( "ExtMaybe", iLevel );
}
//////////////////////////////////////////////////////////////////////////
ExtAndNot_c::ExtAndNot_c ( ExtNode_i * pFirst, ExtNode_i * pSecond )
: ExtTwofer_c ( pFirst, pSecond )
{}
const ExtDoc_t * ExtAndNot_c::GetDocsChunk()
{
// if reject-list is over, simply pass through to accept-list
if ( m_bPassthrough )
return m_pLeft->GetDocsChunk();
const ExtDoc_t * pDocL = m_pDocL;
const ExtDoc_t * pDocR = m_pDocR;
int iDoc = 0;
while ( iDoc<MAX_BLOCK_DOCS-1 )
{
if ( !WarmupDocs ( pDocL, m_pLeft.get() ) )
break;
WarmupDocs ( pDocR, m_pRight.get() );
// if there's nothing to filter against, simply copy leftovers
if ( !HasDocs(pDocR) )
{
while ( HasDocs(pDocL) && iDoc<MAX_BLOCK_DOCS-1 )
m_dDocs[iDoc++] = *pDocL++;
m_bPassthrough = !HasDocs(pDocL);
break;
}
// perform filtering
assert ( pDocL );
assert ( pDocR );
while (true)
{
assert ( iDoc<MAX_BLOCK_DOCS-1 );
assert ( HasDocs(pDocL) && HasDocs(pDocR) );
// copy accepted until min rejected id
while ( pDocL->m_tRowID < pDocR->m_tRowID && iDoc<MAX_BLOCK_DOCS-1 )
m_dDocs[iDoc++] = *pDocL++;
if ( !HasDocs(pDocL) || iDoc==MAX_BLOCK_DOCS-1 )
break;
// skip rejected until min accepted id
while ( pDocR->m_tRowID < pDocL->m_tRowID )
pDocR++;
if ( !HasDocs(pDocR) )
break;
// skip both while ids match
while ( pDocL->m_tRowID==pDocR->m_tRowID && HasDocs(pDocL) )
{
pDocL++;
pDocR++;
}
if ( !HasDocs(pDocL) || !HasDocs(pDocR) )
break;
}
}
m_pDocL = pDocL;
m_pDocR = pDocR;
return ReturnDocsChunk ( iDoc, "andnot" );
}
void ExtAndNot_c::CollectHits ( const ExtDoc_t * pDocs )
{
if ( !pDocs )
return;
const ExtHit_t * pHit = m_pLeft->GetHits(pDocs);
while ( HasHits(pHit) )
m_dHits.Add ( *pHit++ );
}
void ExtAndNot_c::Reset ( const ISphQwordSetup & tSetup )
{
m_bPassthrough = false;
ExtTwofer_c::Reset ( tSetup );
}
void ExtAndNot_c::SetCollectHits()
{
m_pLeft->SetCollectHits();
// m_pRight always ignores hits
}
void ExtAndNot_c::DebugDump ( int iLevel )
{
DebugDumpT ( "ExtAndNot", iLevel );
}
//////////////////////////////////////////////////////////////////////////
ExtNWay_c::ExtNWay_c ( const CSphVector<ExtNode_i *> & dNodes, const ISphQwordSetup & tSetup )
: ExtNode_c { tSetup.m_iMaxTimer }
{
assert ( dNodes.GetLength()>1 );
m_iAtomPos = dNodes[0]->GetAtomPos();
}
ExtNWay_c::~ExtNWay_c ()
{
SafeDelete ( m_pNode );
}
void ExtNWay_c::Reset ( const ISphQwordSetup & tSetup )
{
BufferedNode_c::Reset();
m_pNode->Reset ( tSetup );
m_pDocs = nullptr;
m_pHits = nullptr;
}
int ExtNWay_c::GetQwords ( ExtQwordsHash_t & hQwords )
{
assert ( m_pNode );
return m_pNode->GetQwords ( hQwords );
}
void ExtNWay_c::SetQwordsIDF ( const ExtQwordsHash_t & hQwords )
{
assert ( m_pNode );
m_pNode->SetQwordsIDF ( hQwords );
}
void ExtNWay_c::GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const
{
assert ( m_pNode );
m_pNode->GetTerms ( hQwords, dTermDupes );
}
void ExtNWay_c::HintRowID ( RowID_t tRowID )
{
m_pNode->HintRowID ( tRowID );
}
uint64_t ExtNWay_c::GetWordID() const
{
assert ( m_pNode );
return m_pNode->GetWordID();
}
inline void ExtNWay_c::ConstructNode ( const CSphVector<ExtNode_i *> & dNodes, const CSphVector<WORD> & dPositions, const ISphQwordSetup & tSetup )
{
assert ( m_pNode==NULL );
WORD uLPos = dPositions[0];
ExtNode_i * pCur = dNodes[uLPos++]; // ++ for zero-based to 1-based
ExtAnd_c * pCurEx = NULL;
DWORD uLeaves = dNodes.GetLength();
WORD uRPos;
for ( DWORD i=1; i<uLeaves; i++ )
{
uRPos = dPositions[i];
pCur = pCurEx = new ExtAnd_c ( pCur, dNodes[uRPos++] ); // ++ for zero-based to 1-based
pCurEx->SetNodePos ( uLPos, uRPos );
uLPos = 0;
}
if ( pCurEx )
pCurEx->SetQPosReverse();
pCur->SetCollectHits();
m_pNode = pCur;
}
//////////////////////////////////////////////////////////////////////////
template < class FSM >
ExtNWay_T<FSM>::ExtNWay_T ( const CSphVector<ExtNode_i *> & dNodes, const XQNode_t & tNode, const ISphQwordSetup & tSetup )
: ExtNWay_c ( dNodes, tSetup )
, FSM ( dNodes, tNode, tSetup )
{
CSphVector<WORD> dPositions ( dNodes.GetLength() );
ARRAY_FOREACH ( i, dPositions )
dPositions[i] = (WORD) i;
dPositions.Sort ( ExtNodeTFExt_fn ( dNodes ) );
ConstructNode ( dNodes, dPositions, tSetup );
}
template < class FSM >
const ExtDoc_t * ExtNWay_T<FSM>::GetDocsChunk()
{
if ( !WarmupDocs ( m_pDocs, m_pHits, m_pNode ) )
return nullptr;
const ExtDoc_t * pDoc = m_pDocs;
const ExtHit_t * pHit = m_pHits;
FSM::ResetFSM();
int iDoc = 0;
while ( iDoc<MAX_BLOCK_DOCS-1 )
{
assert ( pHit->m_tRowID==pDoc->m_tRowID );
FSM::ResetFSM();
// iterate all hits for this doc
while ( pHit->m_tRowID==pDoc->m_tRowID )
{
// emit document, if its new and acceptable
if ( FSM::HitFSM ( pHit, m_dMyHits ) && ( !iDoc || pHit->m_tRowID!=m_dDocs[iDoc-1].m_tRowID ) )
{
m_dDocs[iDoc].m_tRowID = pHit->m_tRowID;
m_dDocs[iDoc].m_uDocFields = 1<< ( HITMAN::GetField ( pHit->m_uHitpos ) ); // not necessary
m_dDocs[iDoc].m_fTFIDF = pDoc->m_fTFIDF;
iDoc++;
}
pHit++;
}
pDoc++;
if ( !WarmupDocs ( pDoc, pHit, m_pNode ) )
break;
}
m_pDocs = pDoc;
m_pHits = pHit;
return ReturnDocsChunk ( iDoc, "nway" );
}
template < class FSM >
void ExtNWay_T<FSM>::CollectHits ( const ExtDoc_t * pDocs )
{
CopyMatchingHits ( m_dHits, pDocs );
}
template < class FSM >
void ExtNWay_T<FSM>::DebugDump ( int iLevel )
{
DebugIndent ( iLevel );
printf ( "%s\n", FSM::GetName() );
m_pNode->DebugDump ( iLevel+1 );
}
//////////////////////////////////////////////////////////////////////////
static DWORD GetQposMask ( const CSphVector<ExtNode_i *> & dQwords )
{
DWORD uQposMask = 0;
for ( const ExtNode_i * pNode : dQwords )
{
int iQpos = pNode->GetAtomPos();
// no more than 32 query terms could be checked, all other skipped
if ( iQpos<0x1f )
uQposMask |= ( 1 << iQpos );
}
return uQposMask;
}
FSMphrase_c::FSMphrase_c ( const CSphVector<ExtNode_i *> & dQwords, const XQNode_t & , const ISphQwordSetup & tSetup )
: m_dAtomPos ( dQwords.GetLength() )
{
ARRAY_FOREACH ( i, dQwords )
m_dAtomPos[i] = dQwords[i]->GetAtomPos();
assert ( ( m_dAtomPos.Last()-m_dAtomPos[0]+1 )>0 );
m_dQposDelta.Resize ( m_dAtomPos.Last()-m_dAtomPos[0]+1 );
ARRAY_FOREACH ( i, m_dQposDelta )
m_dQposDelta[i] = -INT_MAX;
for ( int i=1; i<(int)m_dAtomPos.GetLength(); i++ )
m_dQposDelta [ dQwords[i-1]->GetAtomPos() - dQwords[0]->GetAtomPos() ] = dQwords[i]->GetAtomPos() - dQwords[i-1]->GetAtomPos();
if ( tSetup.m_bSetQposMask )
m_uQposMask = GetQposMask ( dQwords );
}
inline bool FSMphrase_c::HitFSM ( const ExtHit_t * pHit, CSphVector<ExtHit_t> & dHits )
{
DWORD uHitposWithField = HITMAN::GetPosWithField ( pHit->m_uHitpos );
// adding start state for start hit
if ( pHit->m_uQuerypos==m_dAtomPos[0] )
{
State_t & tState = m_dStates.Add();
tState.m_iTagQword = 0;
tState.m_uExpHitposWithField = uHitposWithField + m_dQposDelta[0];
}
// updating states
for ( int i=m_dStates.GetLength()-1; i>=0; i-- )
{
if ( m_dStates[i].m_uExpHitposWithField<uHitposWithField )
{
m_dStates.RemoveFast(i); // failed to match
continue;
}
// get next state
if ( m_dStates[i].m_uExpHitposWithField==uHitposWithField && m_dAtomPos [ m_dStates[i].m_iTagQword+1 ]==pHit->m_uQuerypos )
{
m_dStates[i].m_iTagQword++; // check for next elm in query
m_dStates[i].m_uExpHitposWithField = uHitposWithField + m_dQposDelta [ pHit->m_uQuerypos - m_dAtomPos[0] ];
}
// checking if state successfully matched
if ( m_dStates[i].m_iTagQword==m_dAtomPos.GetLength()-1 )
{
DWORD uSpanlen = m_dAtomPos.Last() - m_dAtomPos[0];
ExtHit_t & tTarget = dHits.Add();
tTarget.m_tRowID = pHit->m_tRowID;
tTarget.m_uHitpos = uHitposWithField - uSpanlen;
tTarget.m_uQuerypos = (WORD) m_dAtomPos[0];
tTarget.m_uMatchlen = tTarget.m_uSpanlen = (WORD)( uSpanlen + 1 );
tTarget.m_uWeight = m_dAtomPos.GetLength();
tTarget.m_uQposMask = m_uQposMask;
ResetFSM ();
return true;
}
}
return false;
}
inline void FSMphrase_c::ResetFSM()
{
m_dStates.Resize(0);
}
//////////////////////////////////////////////////////////////////////////
FSMproximity_c::FSMproximity_c ( const CSphVector<ExtNode_i *> & dQwords, const XQNode_t & tNode, const ISphQwordSetup & tSetup )
: m_iMaxDistance ( tNode.m_iOpArg )
, m_uWordsExpected ( dQwords.GetLength() )
{
assert ( m_iMaxDistance>0 );
m_uMinQpos = dQwords[0]->GetAtomPos();
m_uQLen = dQwords.Last()->GetAtomPos() - m_uMinQpos;
m_dProx.Resize ( m_uQLen+1 );
m_dDeltas.Resize ( m_uQLen+1 );
if ( tSetup.m_bSetQposMask )
m_uQposMask = GetQposMask ( dQwords );
}
inline bool FSMproximity_c::HitFSM ( const ExtHit_t * pHit, CSphVector<ExtHit_t> & dHits )
{
// walk through the hitlist and update context
int iQindex = pHit->m_uQuerypos - m_uMinQpos;
DWORD uHitposWithField = HITMAN::GetPosWithField ( pHit->m_uHitpos );
// check if the word is new
if ( m_dProx[iQindex]==UINT_MAX )
m_uWords++;
// update the context
m_dProx[iQindex] = uHitposWithField;
// check if the incoming hit is out of bounds, or affects min pos
if ( uHitposWithField>=m_uExpPos // out of expected bounds
|| iQindex==m_iMinQindex ) // or simply affects min pos
{
m_iMinQindex = iQindex;
int iMinPos = uHitposWithField - m_uQLen - m_iMaxDistance;
ARRAY_FOREACH ( i, m_dProx )
if ( m_dProx[i]!=UINT_MAX )
{
if ( (int)m_dProx[i]<=iMinPos )
{
m_dProx[i] = UINT_MAX;
m_uWords--;
continue;
}
if ( m_dProx[i]<uHitposWithField )
{
m_iMinQindex = i;
uHitposWithField = m_dProx[i];
}
}
m_uExpPos = m_dProx[m_iMinQindex] + m_uQLen + m_iMaxDistance;
}
// all words were found within given distance?
if ( m_uWords!=m_uWordsExpected )
return false;
// compute phrase weight
//
// FIXME! should also account for proximity factor, which is in 1 to maxdistance range:
// m_iMaxDistance - ( pHit->m_uHitpos - m_dProx[m_iMinQindex] - m_uQLen )
DWORD uMax = 0;
ARRAY_FOREACH ( i, m_dProx )
if ( m_dProx[i]!=UINT_MAX )
{
m_dDeltas[i] = m_dProx[i] - i;
uMax = Max ( uMax, m_dProx[i] );
} else
m_dDeltas[i] = INT_MAX;
m_dDeltas.Sort ();
DWORD uCurWeight = 0;
DWORD uWeight = 0;
int iLast = -INT_MAX;
ARRAY_FOREACH_COND ( i, m_dDeltas, m_dDeltas[i]!=INT_MAX )
{
if ( m_dDeltas[i]==iLast )
uCurWeight++;
else
{
uWeight += uCurWeight ? ( 1+uCurWeight ) : 0;
uCurWeight = 0;
}
iLast = m_dDeltas[i];
}
uWeight += uCurWeight ? ( 1+uCurWeight ) : 0;
if ( !uWeight )
uWeight = 1;
// emit hit
ExtHit_t & tTarget = dHits.Add();
tTarget.m_tRowID = pHit->m_tRowID;
tTarget.m_uHitpos = Hitpos_t ( m_dProx[m_iMinQindex] ); // !COMMIT strictly speaking this is creation from LCS not value
tTarget.m_uQuerypos = (WORD) m_uMinQpos;
tTarget.m_uSpanlen = tTarget.m_uMatchlen = (WORD)( uMax-m_dProx[m_iMinQindex]+1 );
tTarget.m_uWeight = uWeight;
tTarget.m_uQposMask = m_uQposMask;
// remove current min, and force recompue
m_dProx[m_iMinQindex] = UINT_MAX;
m_iMinQindex = -1;
m_uWords--;
m_uExpPos = 0;
return true;
}
inline void FSMproximity_c::ResetFSM()
{
m_uExpPos = 0;
m_uWords = 0;
m_iMinQindex = -1;
ARRAY_FOREACH ( i, m_dProx )
m_dProx[i] = UINT_MAX;
}
//////////////////////////////////////////////////////////////////////////
FSMmultinear_c::FSMmultinear_c ( const CSphVector<ExtNode_i *> & dNodes, const XQNode_t & tNode, const ISphQwordSetup & tSetup )
: m_iNear ( tNode.m_iOpArg )
, m_uWordsExpected ( dNodes.GetLength() )
, m_bQposMask ( tSetup.m_bSetQposMask )
{
if ( m_uWordsExpected==2 )
m_bTwofer = true;
else
{
m_dNpos.Reserve ( m_uWordsExpected );
m_dRing.Resize ( m_uWordsExpected );
m_bTwofer = false;
}
assert ( m_iNear>0 );
}
inline bool FSMmultinear_c::HitFSM ( const ExtHit_t * pHit, CSphVector<ExtHit_t> & dHits )
{
// walk through the hitlist and update context
DWORD uHitposWithField = HITMAN::GetPosWithField ( pHit->m_uHitpos );
WORD uNpos = pHit->m_uNodepos;
WORD uQpos = pHit->m_uQuerypos;
// skip dupe hit (may be emitted by OR node, for example)
if ( m_uLastP==uHitposWithField )
{
// lets choose leftmost (in query) from all dupes. 'a NEAR/2 a' case
if ( m_bTwofer && uNpos<m_uFirstNpos )
{
m_uFirstQpos = uQpos;
m_uFirstNpos = uNpos;
return false;
} else if ( !m_bTwofer && uNpos<m_dRing [ RingTail() ].m_uNodepos ) // 'a NEAR/2 a NEAR/2 a' case
{
WORD * p = const_cast<WORD *>( m_dNpos.BinarySearch ( uNpos ) );
if ( !p )
{
p = const_cast<WORD *>( m_dNpos.BinarySearch ( m_dRing [ RingTail() ].m_uNodepos ) );
*p = uNpos;
m_dNpos.Sort();
m_dRing [ RingTail() ].m_uNodepos = uNpos;
m_dRing [ RingTail() ].m_uQuerypos = uQpos;
}
return false;
} else if ( m_uPrelastP && m_uLastML < pHit->m_uMatchlen ) // check if the hit is subset of another one
{
// roll back pre-last to check agains this new hit.
m_uLastML = m_uPrelastML;
m_uLastSL = m_uPrelastSL;
m_uFirstHit = m_uLastP = m_uPrelastP;
m_uWeight = m_uWeight - m_uLastW + m_uPrelastW;
} else
return false;
}
// probably new chain
if ( m_uLastP==0 || ( m_uLastP + m_uLastML + m_iNear )<=uHitposWithField )
{
m_uFirstHit = m_uLastP = uHitposWithField;
m_uLastML = pHit->m_uMatchlen;
m_uLastSL = pHit->m_uSpanlen;
m_uWeight = m_uLastW = pHit->m_uWeight;
m_uFirstQpos = uQpos;
if ( m_bTwofer )
{
m_uFirstNpos = uNpos;
} else
{
m_dNpos.Resize(1);
m_dNpos[0] = uNpos;
Add2Ring ( pHit );
}
return false;
}
// this hit (with such querypos) already was there. Skip the hit.
if ( m_bTwofer )
{
// special case for twofer: hold the overlapping
if ( ( m_uFirstHit + m_uLastML )>uHitposWithField
&& ( m_uFirstHit + m_uLastML )<( uHitposWithField + pHit->m_uMatchlen )
&& m_uLastML!=pHit->m_uMatchlen )
{
m_uFirstHit = m_uLastP = uHitposWithField;
m_uLastML = pHit->m_uMatchlen;
m_uLastSL = pHit->m_uSpanlen;
m_uWeight = m_uLastW = pHit->m_uWeight;
m_uFirstQpos = uQpos;
m_uFirstNpos = uNpos;
return false;
}
if ( uNpos==m_uFirstNpos )
{
if ( m_uLastP < uHitposWithField )
{
m_uPrelastML = m_uLastML;
m_uPrelastSL = m_uLastSL;
m_uPrelastP = m_uLastP;
m_uPrelastW = pHit->m_uWeight;
m_uFirstHit = m_uLastP = uHitposWithField;
m_uLastML = pHit->m_uMatchlen;
m_uLastSL = pHit->m_uSpanlen;
m_uWeight = m_uLastW = m_uPrelastW;
m_uFirstQpos = uQpos;
m_uFirstNpos = uNpos;
}
return false;
}
} else
{
if ( uNpos < m_dNpos[0] )
{
m_uFirstQpos = Min ( m_uFirstQpos, uQpos );
m_dNpos.Insert ( 0, uNpos );
} else if ( uNpos > m_dNpos.Last() )
{
m_uFirstQpos = Min ( m_uFirstQpos, uQpos );
m_dNpos.Add ( uNpos );
} else if ( uNpos!=m_dNpos[0] && uNpos!=m_dNpos.Last() )
{
int iEnd = m_dNpos.GetLength();
int iStart = 0;
int iMid = -1;
while ( iEnd-iStart>1 )
{
iMid = ( iStart + iEnd ) / 2;
if ( uNpos==m_dNpos[iMid] )
{
const ExtHit_t& dHit = m_dRing[m_iRing];
// last addition same as the first. So, we can shift
if ( uNpos==dHit.m_uNodepos )
{
m_uWeight -= dHit.m_uWeight;
m_uFirstHit = HITMAN::GetPosWithField ( dHit.m_uHitpos );
ShiftRing();
// last addition same as the first. So, we can shift
} else if ( uNpos==m_dRing [ RingTail() ].m_uNodepos )
m_uWeight -= m_dRing [ RingTail() ].m_uWeight;
else
return false;
}
if ( uNpos<m_dNpos[iMid] )
iEnd = iMid;
else
iStart = iMid;
}
m_dNpos.Insert ( iEnd, uNpos );
m_uFirstQpos = Min ( m_uFirstQpos, uQpos );
// last addition same as the first. So, we can shift
} else if ( uNpos==m_dRing[m_iRing].m_uNodepos )
{
m_uWeight -= m_dRing[m_iRing].m_uWeight;
m_uFirstHit = HITMAN::GetPosWithField ( m_dRing[m_iRing].m_uHitpos );
ShiftRing();
// last addition same as the tail. So, we can move the tail onto it.
} else if ( uNpos==m_dRing [ RingTail() ].m_uNodepos )
m_uWeight -= m_dRing [ RingTail() ].m_uWeight;
else
return false;
}
m_uWeight += pHit->m_uWeight;
m_uLastML = pHit->m_uMatchlen;
m_uLastSL = pHit->m_uSpanlen;
Add2Ring ( pHit );
// finally got the whole chain - emit it!
// warning: we don't support overlapping in generic chains.
if ( m_bTwofer || (int)m_uWordsExpected==m_dNpos.GetLength() )
{
ExtHit_t & tTarget = dHits.Add();
tTarget.m_tRowID = pHit->m_tRowID;
tTarget.m_uHitpos = Hitpos_t ( m_uFirstHit ); // !COMMIT strictly speaking this is creation from LCS not value
tTarget.m_uMatchlen = (WORD)( uHitposWithField - m_uFirstHit + m_uLastML );
tTarget.m_uWeight = m_uWeight;
m_uPrelastP = 0;
if ( m_bTwofer ) // for exactly 2 words allow overlapping - so, just shift the chain, not reset it
{
tTarget.m_uQuerypos = Min ( m_uFirstQpos, pHit->m_uQuerypos );
tTarget.m_uSpanlen = 2;
tTarget.m_uQposMask = ( 1 << ( Max ( m_uFirstQpos, pHit->m_uQuerypos ) - tTarget.m_uQuerypos ) );
m_uFirstHit = m_uLastP = uHitposWithField;
m_uWeight = pHit->m_uWeight;
m_uFirstQpos = pHit->m_uQuerypos;
} else
{
tTarget.m_uQuerypos = Min ( m_uFirstQpos, pHit->m_uQuerypos );
tTarget.m_uSpanlen = (WORD) m_dNpos.GetLength();
tTarget.m_uQposMask = 0;
m_uLastP = 0;
if ( m_bQposMask && tTarget.m_uSpanlen>1 )
{
int iNpos0 = m_dNpos[0];
ARRAY_FOREACH ( i, m_dNpos )
{
int iQposDelta = ( m_dNpos[i] - iNpos0 ) + tTarget.m_uQuerypos;
assert ( iQposDelta<(int)sizeof(tTarget.m_uQposMask)*8 );
tTarget.m_uQposMask |= ( 1 << iQposDelta );
}
}
}
return true;
}
m_uLastP = uHitposWithField;
return false;
}
inline void FSMmultinear_c::ResetFSM()
{
m_iRing = m_uLastP = m_uPrelastP = 0;
}
inline int FSMmultinear_c::RingTail() const
{
return ( m_iRing + m_dNpos.GetLength() - 1 ) % m_uWordsExpected;
}
inline void FSMmultinear_c::Add2Ring ( const ExtHit_t* pHit )
{
if ( !m_bTwofer )
m_dRing [ RingTail() ] = *pHit;
}
inline void FSMmultinear_c::ShiftRing()
{
if ( ++m_iRing==(int)m_uWordsExpected )
m_iRing=0;
}
//////////////////////////////////////////////////////////////////////////
struct QuorumDupeNodeHash_t
{
uint64_t m_uWordID;
int m_iIndex;
bool operator < ( const QuorumDupeNodeHash_t & b ) const
{
if ( m_uWordID==b.m_uWordID )
return m_iIndex<b.m_iIndex;
else
return m_uWordID<b.m_uWordID;
}
};
struct QuorumNodeAtomPos_fn
{
inline bool IsLess ( const ExtQuorum_c::TermTuple_t & a, const ExtQuorum_c::TermTuple_t & b ) const
{
return a.m_pTerm->GetAtomPos() < b.m_pTerm->GetAtomPos();
}
};
ExtQuorum_c::ExtQuorum_c ( CSphVector<ExtNode_i*> & dQwords, const XQNode_t & tNode, const ISphQwordSetup & tSetup )
: ExtNode_c ( tSetup.m_iMaxTimer)
{
assert ( tNode.GetOp()==SPH_QUERY_QUORUM );
assert ( dQwords.GetLength()<MAX_HITS );
m_iThresh = GetThreshold ( tNode, dQwords.GetLength() );
m_iThresh = Max ( m_iThresh, 1 );
m_bHasDupes = false;
assert ( dQwords.GetLength()>1 ); // use TERM instead
assert ( dQwords.GetLength()<=256 ); // internal masks are upto 256 bits
assert ( m_iThresh>=1 ); // 1 is also OK; it's a bit different from just OR
assert ( m_iThresh<dQwords.GetLength() ); // use AND instead
if ( dQwords.GetLength()>0 )
{
m_iAtomPos = dQwords[0]->GetAtomPos();
// compute duplicate keywords mask (aka dupe mask)
// FIXME! will fail with wordforms and stuff; sorry, no wordforms vs expand vs quorum support for now!
CSphFixedVector<QuorumDupeNodeHash_t> dHashes ( dQwords.GetLength() );
ARRAY_FOREACH ( i, dQwords )
{
dHashes[i].m_uWordID = dQwords[i]->GetWordID();
dHashes[i].m_iIndex = i;
}
sphSort ( dHashes.Begin(), dHashes.GetLength() );
QuorumDupeNodeHash_t tParent = *dHashes.Begin();
m_dInitialChildren.Add().m_pTerm = dQwords[tParent.m_iIndex];
m_dInitialChildren.Last().m_iCount = 1;
tParent.m_iIndex = 0;
for ( int i=1; i<dHashes.GetLength(); i++ )
{
QuorumDupeNodeHash_t & tElem = dHashes[i];
if ( tParent.m_uWordID!=tElem.m_uWordID )
{
tParent = tElem;
tParent.m_iIndex = m_dInitialChildren.GetLength();
m_dInitialChildren.Add().m_pTerm = dQwords [ tElem.m_iIndex ];
m_dInitialChildren.Last().m_iCount = 1;
} else
{
m_dInitialChildren[tParent.m_iIndex].m_iCount++;
SafeDelete ( dQwords[tElem.m_iIndex] );
m_bHasDupes = true;
}
}
// sort back to qpos order
m_dInitialChildren.Sort ( QuorumNodeAtomPos_fn() );
}
ARRAY_FOREACH ( i, m_dInitialChildren )
{
m_dInitialChildren[i].m_pCurDoc = NULL;
m_dInitialChildren[i].m_pCurHit = NULL;
m_dInitialChildren[i].m_pTerm->SetCollectHits();
}
m_dChildren = m_dInitialChildren;
}
ExtQuorum_c::~ExtQuorum_c ()
{
ARRAY_FOREACH ( i, m_dInitialChildren )
SafeDelete ( m_dInitialChildren[i].m_pTerm );
}
void ExtQuorum_c::Reset ( const ISphQwordSetup & tSetup )
{
BufferedNode_c::Reset();
m_dChildren = m_dInitialChildren;
ARRAY_FOREACH ( i, m_dChildren )
m_dChildren[i].m_pTerm->Reset ( tSetup );
}
int ExtQuorum_c::GetQwords ( ExtQwordsHash_t & hQwords )
{
int iMax = -1;
ARRAY_FOREACH ( i, m_dChildren )
{
int iKidMax = m_dChildren[i].m_pTerm->GetQwords ( hQwords );
iMax = Max ( iMax, iKidMax );
}
return iMax;
}
void ExtQuorum_c::SetQwordsIDF ( const ExtQwordsHash_t & hQwords )
{
ARRAY_FOREACH ( i, m_dChildren )
m_dChildren[i].m_pTerm->SetQwordsIDF ( hQwords );
}
void ExtQuorum_c::GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const
{
ARRAY_FOREACH ( i, m_dChildren )
m_dChildren[i].m_pTerm->GetTerms ( hQwords, dTermDupes );
}
uint64_t ExtQuorum_c::GetWordID() const
{
uint64_t uHash = SPH_FNV64_SEED;
ARRAY_FOREACH ( i, m_dChildren )
{
uint64_t uCur = m_dChildren[i].m_pTerm->GetWordID();
uHash = sphFNV64 ( &uCur, sizeof(uCur), uHash );
}
return uHash;
}
void ExtQuorum_c::HintRowID ( RowID_t tRowID )
{
for ( auto & i : m_dChildren )
if ( i.m_pTerm )
i.m_pTerm->HintRowID ( tRowID );
}
NodeEstimate_t ExtQuorum_c::Estimate ( int64_t iTotalDocs ) const
{
NodeEstimate_t tEst;
for ( auto & i : m_dChildren )
if ( i.m_pTerm )
tEst += i.m_pTerm->Estimate(iTotalDocs);
return tEst;
}
void ExtQuorum_c::SetRowidBoundaries ( const RowIdBoundaries_t & tBoundaries )
{
for ( auto & i : m_dChildren )
if ( i.m_pTerm )
i.m_pTerm->SetRowidBoundaries(tBoundaries);
}
const ExtDoc_t * ExtQuorum_c::GetDocsChunk()
{
// warmup
ARRAY_FOREACH ( i, m_dChildren )
{
TermTuple_t & tElem = m_dChildren[i];
if ( HasDocs(tElem.m_pCurDoc) )
continue;
tElem.m_pCurDoc = tElem.m_pTerm->GetDocsChunk();
if ( tElem.m_pCurDoc )
tElem.m_pCurHit = tElem.m_pTerm->GetHits ( tElem.m_pCurDoc );
else
{
m_dChildren.RemoveFast(i);
i--;
}
}
// main loop
int iDoc = 0;
int iQuorumLeft = CountQuorum ( true );
while ( iDoc<MAX_BLOCK_DOCS-1 && iQuorumLeft>=m_iThresh )
{
// find min document ID, count occurrences
ExtDoc_t tCand;
tCand.m_tRowID = INVALID_ROWID; // current candidate id
tCand.m_uDocFields = 0; // non necessary
tCand.m_fTFIDF = 0.0f;
int iQuorum = 0;
for ( auto & tChild : m_dChildren )
{
assert ( HasDocs ( tChild.m_pCurDoc ) );
if ( tChild.m_pCurDoc->m_tRowID < tCand.m_tRowID )
{
tCand = *tChild.m_pCurDoc;
iQuorum = tChild.m_iCount;
}
else if ( tChild.m_pCurDoc->m_tRowID==tCand.m_tRowID )
{
tCand.m_uDocFields |= tChild.m_pCurDoc->m_uDocFields; // FIXME!!! check hits in case of dupes or field constrain
tCand.m_fTFIDF += tChild.m_pCurDoc->m_fTFIDF;
iQuorum += tChild.m_iCount;
}
}
if ( iQuorum>=m_iThresh && CollectMatchingHits ( tCand.m_tRowID, m_iThresh ) )
m_dDocs[iDoc++] = tCand;
// advance children
int iNumChildren = m_dChildren.GetLength();
ARRAY_FOREACH ( i, m_dChildren )
{
TermTuple_t & tElem = m_dChildren[i];
if ( tElem.m_pCurDoc->m_tRowID!=tCand.m_tRowID )
continue;
tElem.m_pCurDoc++;
if ( HasDocs(tElem.m_pCurDoc) )
continue;
tElem.m_pCurDoc = tElem.m_pTerm->GetDocsChunk();
if ( tElem.m_pCurDoc )
tElem.m_pCurHit = tElem.m_pTerm->GetHits ( tElem.m_pCurDoc );
else
{
m_dChildren.RemoveFast ( i );
i--;
}
}
if ( iNumChildren!=m_dChildren.GetLength() )
iQuorumLeft = CountQuorum ( false );
}
return ReturnDocsChunk ( iDoc, "quorum" );
}
struct QuorumCmpHitPos_fn
{
inline bool IsLess ( const ExtHit_t & a, const ExtHit_t & b ) const
{
if ( a.m_tRowID==b.m_tRowID )
{
DWORD uHitPosA = HITMAN::GetPosWithField(a.m_uHitpos);
DWORD uHitPosB = HITMAN::GetPosWithField(b.m_uHitpos);
if ( uHitPosA==uHitPosB )
{
if ( a.m_uQuerypos==b.m_uQuerypos )
return HITMAN::IsEnd ( a.m_uHitpos ) < HITMAN::IsEnd ( b.m_uHitpos );
else
return ( a.m_uQuerypos<b.m_uQuerypos );
}
return uHitPosA<uHitPosB;
}
return a.m_tRowID<b.m_tRowID;
}
};
void ExtQuorum_c::CollectHits ( const ExtDoc_t * pDocs )
{
CopyMatchingHits ( m_dHits, pDocs );
m_dHits.Sort ( QuorumCmpHitPos_fn() );
}
int ExtQuorum_c::CountQuorum ( bool bFixDupes )
{
if ( !m_bHasDupes )
return m_dChildren.GetLength();
int iSum = 0;
bool bHasDupes = false;
ARRAY_FOREACH ( i, m_dChildren )
{
iSum += m_dChildren[i].m_iCount;
bHasDupes |= ( m_dChildren[i].m_iCount>1 );
}
#if QDEBUG
if ( bFixDupes && bHasDupes!=m_bHasDupes )
printf ( "quorum dupes %d -> %d\n", m_bHasDupes, bHasDupes );
#endif
m_bHasDupes = bFixDupes ? bHasDupes : m_bHasDupes;
return iSum;
}
int ExtQuorum_c::GetThreshold ( const XQNode_t & tNode, int iQwords )
{
return ( tNode.m_bPercentOp ? (int)floor ( 1.0f / 100.0f * tNode.m_iOpArg * iQwords + 0.5f ) : tNode.m_iOpArg );
}
bool ExtQuorum_c::CollectMatchingHits ( RowID_t tRowID, int iThreshold )
{
if ( !m_bHasDupes )
{
for ( auto & tChild : m_dChildren )
{
while ( tChild.m_pCurHit->m_tRowID < tRowID )
tChild.m_pCurHit++;
while ( tChild.m_pCurHit->m_tRowID==tRowID )
m_dMyHits.Add ( *tChild.m_pCurHit++ );
}
return true;
}
int iOldHitLen = m_dMyHits.GetLength();
int iQuorum = 0;
for ( auto & tChild : m_dChildren )
{
const ExtHit_t * & pHit = tChild.m_pCurHit;
if ( !HasHits(pHit) )
continue;
while ( pHit->m_tRowID<tRowID )
pHit++;
// collect matched hits but only up to quorum.count per-term
for ( int iTermHits = 0; iTermHits<tChild.m_iCount && pHit->m_tRowID==tRowID; iTermHits++, iQuorum++ )
m_dMyHits.Add ( *pHit++ );
// got quorum - no need to check further
if ( iQuorum>=iThreshold )
break;
}
// discard collected hits in case of no quorum matched
if ( iQuorum<iThreshold )
{
m_dMyHits.Resize ( iOldHitLen );
return false;
}
// collect all hits to move docs/hits further
for ( auto & tChild : m_dChildren )
{
while ( tChild.m_pCurHit->m_tRowID==tRowID )
m_dMyHits.Add ( *tChild.m_pCurHit++ );
}
return true;
}
//////////////////////////////////////////////////////////////////////////
ExtOrder_c::ExtOrder_c ( const CSphVector<ExtNode_i *> & dChildren, const ISphQwordSetup & tSetup )
: ExtNode_c { tSetup.m_iMaxTimer }
, m_dChildren ( dChildren )
, m_bDone ( false )
{
int iChildren = dChildren.GetLength();
assert ( iChildren>=2 );
m_dChildDoc.Resize ( iChildren );
m_dChildHit.Resize ( iChildren );
m_dChildDocsChunk.Resize ( iChildren );
if ( dChildren.GetLength()>0 )
m_iAtomPos = dChildren[0]->GetAtomPos();
ARRAY_FOREACH ( i, dChildren )
{
assert ( m_dChildren[i] );
m_dChildDoc[i] = nullptr;
m_dChildHit[i] = nullptr;
m_dChildren[i]->SetCollectHits();
}
}
void ExtOrder_c::Reset ( const ISphQwordSetup & tSetup )
{
m_bDone = false;
m_dMyHits.Resize(0);
m_dChildDoc.Fill(nullptr);
m_dChildHit.Fill(nullptr);
for ( auto pChild : m_dChildren )
{
assert(pChild);
pChild->Reset(tSetup);
}
}
ExtOrder_c::~ExtOrder_c()
{
for ( auto & pChild : m_dChildren )
SafeDelete ( pChild );
}
// rewinds all children hitlists to given row id
// returns the one with min hitpos
int ExtOrder_c::GetChildIdWithNextHit ( RowID_t tRowID )
{
// OPTIMIZE! implement PQ instead of full-scan
DWORD uMinPosWithField = UINT_MAX;
int iChild = -1;
ARRAY_FOREACH ( i, m_dChildren )
{
const ExtHit_t * & pHit = m_dChildHit[i];
// skip until proper hit
while ( pHit->m_tRowID<tRowID )
pHit++;
// is this our man at all?
if ( pHit->m_tRowID==tRowID )
{
// is he the best we can get?
if ( HITMAN::GetPosWithField ( pHit->m_uHitpos ) < uMinPosWithField )
{
uMinPosWithField = HITMAN::GetPosWithField ( pHit->m_uHitpos );
iChild = i;
}
}
}
return iChild;
}
bool ExtOrder_c::GetMatchingHits ( RowID_t tRowID )
{
// my trackers
CSphVector<ExtHit_t> dAccLongest;
CSphVector<ExtHit_t> dAccRecent;
int iPosLongest = 0; // needed to handle cases such as "a b c" << a
int iPosRecent = 0;
int iField = -1;
dAccLongest.Reserve ( m_dChildren.GetLength() );
dAccRecent.Reserve ( m_dChildren.GetLength() );
int nOldHits = m_dMyHits.GetLength();
while ( true )
{
// get next hit (in hitpos ascending order)
int iChild = GetChildIdWithNextHit ( tRowID );
if ( iChild<0 )
break; // OPTIMIZE? no trailing hits on this route
const ExtHit_t * & pHit = m_dChildHit[iChild];
assert ( pHit->m_tRowID==tRowID );
// most recent subseq must never be longer
assert ( dAccRecent.GetLength()<=dAccLongest.GetLength() );
// handle that hit!
int iHitField = HITMAN::GetField ( pHit->m_uHitpos );
int iHitPos = HITMAN::GetPos ( pHit->m_uHitpos );
if ( iHitField!=iField )
{
// new field; reset both trackers
dAccLongest.Resize ( 0 );
dAccRecent.Resize ( 0 );
// initial seeding, if needed
if ( iChild==0 )
{
dAccLongest.Add ( *pHit );
iPosLongest = iHitPos + pHit->m_uSpanlen;
iField = iHitField;
}
} else if ( iChild==dAccLongest.GetLength() && iHitPos>=iPosLongest )
{
// it fits longest tracker
dAccLongest.Add ( *pHit );
iPosLongest = iHitPos + pHit->m_uSpanlen;
// fully matched subsequence
if ( dAccLongest.GetLength()==m_dChildren.GetLength() )
{
// flush longest tracker into buffer, and keep it terminated
ARRAY_FOREACH ( i, dAccLongest )
m_dMyHits.Add ( dAccLongest[i] );
// reset both trackers
dAccLongest.Resize ( 0 );
dAccRecent.Resize ( 0 );
iPosRecent = iPosLongest;
}
} else if ( iChild==0 )
{
// it restarts most-recent tracker
dAccRecent.Resize ( 0 );
dAccRecent.Add ( *pHit );
iPosRecent = iHitPos + pHit->m_uSpanlen;
if ( !dAccLongest.GetLength() )
{
dAccLongest.Add ( *pHit );
iPosLongest = iHitPos + pHit->m_uSpanlen;
}
} else if ( iChild==dAccRecent.GetLength() && iHitPos>=iPosRecent )
{
// it fits most-recent tracker
dAccRecent.Add ( *pHit );
iPosRecent = iHitPos + pHit->m_uSpanlen;
// maybe most-recent just became longest too?
if ( dAccRecent.GetLength()==dAccLongest.GetLength() )
{
dAccLongest.SwapData ( dAccRecent );
dAccRecent.Resize ( 0 );
iPosLongest = iPosRecent;
}
}
// advance hit stream
pHit++;
}
return nOldHits!=m_dMyHits.GetLength();
}
const ExtDoc_t * ExtOrder_c::GetDocsChunk()
{
if ( m_bDone )
return nullptr;
// warm up
ARRAY_FOREACH ( i, m_dChildren )
{
if ( !m_dChildDoc[i] )
{
m_dChildDoc[i] = m_dChildDocsChunk[i] = m_dChildren[i]->GetDocsChunk();
m_dChildHit[i] = nullptr;
}
if ( !m_dChildDoc[i] )
{
m_bDone = true;
return nullptr;
}
}
// match while there's enough space in buffer
int iDoc = 0;
while ( iDoc<MAX_BLOCK_DOCS-1 )
{
// find next candidate document (that has all the words)
RowID_t tRowID = m_dChildDoc[0]->m_tRowID;
assert ( tRowID!=INVALID_ROWID );
int iChild = 1;
while ( iChild < m_dChildren.GetLength() )
{
// skip docs with too small ids
assert ( m_dChildDoc[iChild] );
while ( m_dChildDoc[iChild]->m_tRowID < tRowID )
m_dChildDoc[iChild]++;
// block end? pull next block and keep scanning
if ( !HasDocs ( m_dChildDoc[iChild] ) )
{
m_dChildDoc[iChild] = m_dChildDocsChunk[iChild] = m_dChildren[iChild]->GetDocsChunk();
m_dChildHit[iChild] = nullptr;
if ( !m_dChildDoc[iChild] )
{
m_bDone = true;
return ReturnDocsChunk ( iDoc, "order" );
}
continue;
}
// too big id? its out next candidate
if ( m_dChildDoc[iChild]->m_tRowID > tRowID )
{
tRowID = m_dChildDoc[iChild]->m_tRowID;
iChild = 0;
continue;
}
assert ( m_dChildDoc[iChild]->m_tRowID==tRowID );
iChild++;
}
#ifndef NDEBUG
assert ( tRowID!=INVALID_ROWID );
for ( auto pChildDoc : m_dChildDoc )
{
assert ( pChildDoc );
assert ( pChildDoc->m_tRowID==tRowID );
}
#endif
// fetch hits
ARRAY_FOREACH ( i, m_dChildren )
{
if ( !m_dChildHit[i] )
m_dChildHit[i] = m_dChildren[i]->GetHits ( m_dChildDoc[i] );
}
// match and save hits
if ( GetMatchingHits ( tRowID ) )
m_dDocs[iDoc++] = *m_dChildDoc[0];
// advance doc stream
m_dChildDoc[0]++;
if ( !HasDocs ( m_dChildDoc[0] ) )
{
m_dChildDoc[0] = m_dChildDocsChunk[0] = m_dChildren[0]->GetDocsChunk();
m_dChildHit[0] = nullptr;
if ( !m_dChildDoc[0] )
{
m_bDone = true;
break;
}
}
}
return ReturnDocsChunk ( iDoc, "order" );
}
void ExtOrder_c::CollectHits ( const ExtDoc_t * pDocs )
{
CopyMatchingHits ( m_dHits, pDocs );
PrintHitsChunk ( m_dHits.GetLength(), m_iAtomPos, m_dHits.Begin(), this );
}
int ExtOrder_c::GetQwords ( ExtQwordsHash_t & hQwords )
{
int iMax = -1;
ARRAY_FOREACH ( i, m_dChildren )
{
int iKidMax = m_dChildren[i]->GetQwords ( hQwords );
iMax = Max ( iMax, iKidMax );
}
return iMax;
}
void ExtOrder_c::SetQwordsIDF ( const ExtQwordsHash_t & hQwords )
{
ARRAY_FOREACH ( i, m_dChildren )
m_dChildren[i]->SetQwordsIDF ( hQwords );
}
void ExtOrder_c::GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const
{
ARRAY_FOREACH ( i, m_dChildren )
m_dChildren[i]->GetTerms ( hQwords, dTermDupes );
}
uint64_t ExtOrder_c::GetWordID () const
{
uint64_t uHash = SPH_FNV64_SEED;
ARRAY_FOREACH ( i, m_dChildren )
{
uint64_t uCur = m_dChildren[i]->GetWordID();
uHash = sphFNV64 ( &uCur, sizeof(uCur), uHash );
}
return uHash;
}
void ExtOrder_c::HintRowID ( RowID_t tRowID )
{
for ( auto i : m_dChildren )
i->HintRowID ( tRowID );
}
NodeEstimate_t ExtOrder_c::Estimate ( int64_t iTotalDocs ) const
{
NodeEstimate_t tEst;
for ( const auto & i : m_dChildren )
tEst += i->Estimate(iTotalDocs);
return tEst;
}
void ExtOrder_c::SetRowidBoundaries ( const RowIdBoundaries_t & tBoundaries )
{
for ( auto & i : m_dChildren )
i->SetRowidBoundaries(tBoundaries);
}
//////////////////////////////////////////////////////////////////////////
template<bool ROWID_LIMITS>
ExtUnit_T<ROWID_LIMITS>::ExtUnit_T ( ExtNode_i * pFirst, ExtNode_i * pSecond, const FieldMask_t & uFields, const ISphQwordSetup & tSetup, const char * szUnit )
: ExtNode_c { tSetup.m_iMaxTimer }
, m_pArg1 ( pFirst )
, m_pArg2 ( pSecond )
{
XQKeyword_t tDot;
tDot.m_sWord = szUnit;
if ( tSetup.m_pStats )
m_pDot = new ExtTerm_T<false,ROWID_LIMITS,true> ( CreateQueryWord ( tDot, tSetup ), uFields, tSetup, true );
else
m_pDot = new ExtTerm_T<false,ROWID_LIMITS,false> ( CreateQueryWord ( tDot, tSetup ), uFields, tSetup, true );
m_pArg1->SetCollectHits();
m_pArg2->SetCollectHits();
m_pDot->SetCollectHits();
}
template<bool ROWID_LIMITS>
ExtUnit_T<ROWID_LIMITS>::~ExtUnit_T ()
{
SafeDelete ( m_pArg1 );
SafeDelete ( m_pArg2 );
SafeDelete ( m_pDot );
}
template<bool ROWID_LIMITS>
void ExtUnit_T<ROWID_LIMITS>::Reset ( const ISphQwordSetup & tSetup )
{
m_pArg1->Reset ( tSetup );
m_pArg2->Reset ( tSetup );
m_pDot->Reset ( tSetup );
m_pDocs1 = m_pDocs2 = m_pDotDocs = nullptr;
m_pDoc1 = m_pDoc2 = m_pDotDoc = nullptr;
m_bNeedDotHits = false;
BufferedNode_c::Reset();
}
template<bool ROWID_LIMITS>
int ExtUnit_T<ROWID_LIMITS>::GetQwords ( ExtQwordsHash_t & hQwords )
{
int iMax1 = m_pArg1->GetQwords ( hQwords );
int iMax2 = m_pArg2->GetQwords ( hQwords );
return Max ( iMax1, iMax2 );
}
template<bool ROWID_LIMITS>
void ExtUnit_T<ROWID_LIMITS>::SetQwordsIDF ( const ExtQwordsHash_t & hQwords )
{
m_pArg1->SetQwordsIDF ( hQwords );
m_pArg2->SetQwordsIDF ( hQwords );
}
template<bool ROWID_LIMITS>
void ExtUnit_T<ROWID_LIMITS>::GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const
{
m_pArg1->GetTerms ( hQwords, dTermDupes );
m_pArg2->GetTerms ( hQwords, dTermDupes );
}
template<bool ROWID_LIMITS>
uint64_t ExtUnit_T<ROWID_LIMITS>::GetWordID() const
{
uint64_t dHash[2];
dHash[0] = m_pArg1->GetWordID();
dHash[1] = m_pArg2->GetWordID();
return sphFNV64 ( dHash, sizeof(dHash) );
}
template<bool ROWID_LIMITS>
void ExtUnit_T<ROWID_LIMITS>::HintRowID ( RowID_t tRowID )
{
m_pArg1->HintRowID ( tRowID );
m_pArg2->HintRowID ( tRowID );
}
template<bool ROWID_LIMITS>
void ExtUnit_T<ROWID_LIMITS>::DebugDump ( int iLevel )
{
DebugIndent ( iLevel );
printf ( "ExtSentence\n" );
m_pArg1->DebugDump ( iLevel+1 );
m_pArg2->DebugDump ( iLevel+1 );
}
template<bool ROWID_LIMITS>
NodeEstimate_t ExtUnit_T<ROWID_LIMITS>::Estimate ( int64_t iTotalDocs ) const
{
assert ( m_pArg1 && m_pArg2 && m_pDot );
NodeEstimate_t tRes;
tRes += m_pArg1->Estimate(iTotalDocs);
tRes += m_pArg2->Estimate(iTotalDocs);
tRes += m_pDot->Estimate(iTotalDocs);
return tRes;
}
template<bool ROWID_LIMITS>
void ExtUnit_T<ROWID_LIMITS>::SetRowidBoundaries ( const RowIdBoundaries_t & tBoundaries )
{
m_pArg1->SetRowidBoundaries(tBoundaries);
m_pArg2->SetRowidBoundaries(tBoundaries);
m_pDot->SetRowidBoundaries(tBoundaries);
}
/// skips hits within current document while their position is less or equal than the given limit
/// returns true if a matching hit (with big enough position, and in current document) was found
/// returns false otherwise
static inline bool SkipHitsLtePos ( const ExtHit_t * & pHits, Hitpos_t uPos )
{
assert ( pHits );
RowID_t tRowID = pHits->m_tRowID;
if ( tRowID==INVALID_ROWID )
return false;
while ( pHits->m_tRowID==tRowID && pHits->m_uHitpos<=uPos )
pHits++;
return pHits->m_tRowID==tRowID;
}
template<bool ROWID_LIMITS>
void ExtUnit_T<ROWID_LIMITS>::FilterHits ( const ExtDoc_t * pDoc1, const ExtDoc_t * pDoc2, const ExtHit_t * & pHit1, const ExtHit_t * & pHit2, const ExtHit_t * & pDotHit, DWORD uSentenceEnd, RowID_t tRowID, int & iDoc )
{
bool bRegistered = false;
while ( true )
{
if ( uSentenceEnd )
{
// we're in a matched sentence state
// copy hits until next dot
bool bValid1 = pHit1->m_tRowID==tRowID && pHit1->m_uHitpos<uSentenceEnd;
bool bValid2 = pHit2->m_tRowID==tRowID && pHit2->m_uHitpos<uSentenceEnd;
if ( !bValid1 && !bValid2 )
{
// no more hits in this sentence
uSentenceEnd = 0;
if ( pHit1->m_tRowID==tRowID && pHit2->m_tRowID==tRowID )
continue; // no more in-sentence hits, but perhaps more sentences in this document
else
break; // document is over
}
// register document as matching
if ( !bRegistered )
{
ExtDoc_t & tDoc = m_dDocs[iDoc++];
tDoc.m_tRowID = pDoc1->m_tRowID;
tDoc.m_uDocFields = pDoc1->m_uDocFields | pDoc2->m_uDocFields; // non necessary
tDoc.m_fTFIDF = pDoc1->m_fTFIDF + pDoc2->m_fTFIDF;
bRegistered = true; // just once
}
if ( bValid1 && ( !bValid2 || IsHitLess ( pHit1, pHit2 ) ) )
m_dMyHits.Add ( *pHit1++ );
else
m_dMyHits.Add ( *pHit2++ );
}
else
{
// no sentence matched yet
// let's check the next hit pair
assert ( pHit1->m_tRowID==tRowID );
assert ( pHit2->m_tRowID==tRowID );
assert ( pDotHit->m_tRowID==tRowID );
// our current hit pair locations
DWORD uMin = Min ( pHit1->m_uHitpos, pHit2->m_uHitpos );
DWORD uMax = Max ( pHit1->m_uHitpos, pHit2->m_uHitpos );
// skip all dots beyond the min location
if ( !SkipHitsLtePos ( pDotHit, uMin ) )
{
// we have a match!
// moreover, no more dots past min location in current document
// copy hits until next document
uSentenceEnd = UINT_MAX;
continue;
}
// does the first post-pair-start dot separate our hit pair?
if ( pDotHit->m_uHitpos < uMax )
{
// yes, got an "A dot B" case
// rewind candidate hits past this dot, break if current document is over
if ( !SkipHitsLtePos ( pHit1, pDotHit->m_uHitpos ) )
break;
if ( !SkipHitsLtePos ( pHit2, pDotHit->m_uHitpos ) )
break;
continue;
}
else
{
// we have a match!
// copy hits until next dot
if ( !SkipHitsLtePos ( pDotHit, uMax ) )
uSentenceEnd = UINT_MAX; // correction, no next dot, so make it "next document"
else
uSentenceEnd = pDotHit->m_uHitpos;
assert ( uSentenceEnd );
}
}
}
}
template<bool ROWID_LIMITS>
const ExtDoc_t * ExtUnit_T<ROWID_LIMITS>::GetDocsChunk()
{
// SENTENCE operator is essentially AND on steroids
// that also takes relative dot positions into account
//
// when document matches both args but not the dot, it degenerates into AND
// we immediately lookup and copy matching document hits anyway, though
// this is suboptimal (because these hits might never be required at all)
// but this is expected to be rare case, so let's keep code simple
//
// when document matches both args and the dot, we need to filter the hits
// only those left/right pairs that are not (!) separated by a dot should match
int iDoc = 0;
const ExtHit_t * pHit1 = m_pHit1;
const ExtHit_t * pHit2 = m_pHit2;
const ExtHit_t * pDotHit = m_pDotHit;
const ExtDoc_t * pDoc1 = m_pDoc1;
const ExtDoc_t * pDoc2 = m_pDoc2;
const ExtDoc_t * pDotDoc = m_pDotDoc;
bool bNeedDoc1Hits = false;
bool bNeedDoc2Hits = false;
while ( iDoc<MAX_BLOCK_DOCS-1 )
{
// fetch more candidate docs, if needed
if ( !HasDocs(pDoc1) )
{
if ( HasDocs(pDoc2) )
m_pArg1->HintRowID ( pDoc2->m_tRowID );
pDoc1 = m_pArg1->GetDocsChunk();
if ( !HasDocs(pDoc1) )
break;
bNeedDoc1Hits = true;
}
if ( !HasDocs(pDoc2) )
{
if ( HasDocs(pDoc1) )
m_pArg2->HintRowID ( pDoc1->m_tRowID );
pDoc2 = m_pArg2->GetDocsChunk();
if ( !HasDocs(pDoc2) )
break;
bNeedDoc2Hits = true;
}
// find next candidate match
while ( pDoc1->m_tRowID!=pDoc2->m_tRowID && HasDocs(pDoc1) && HasDocs(pDoc2) )
{
while ( pDoc1->m_tRowID < pDoc2->m_tRowID && HasDocs(pDoc2) )
pDoc1++;
while ( pDoc1->m_tRowID > pDoc2->m_tRowID && HasDocs(pDoc1) )
pDoc2++;
}
// got our candidate that matches AND?
RowID_t tRowID = pDoc1->m_tRowID;
if ( !HasDocs(pDoc1) || !HasDocs(pDoc2) )
continue;
// yes, now fetch more dots docs, if needed
// note how NULL is accepted here, "A and B but no dots" case is valid!
if ( !HasDocs(pDotDoc) )
{
m_pDot->HintRowID(tRowID);
pDotDoc = m_pDotDocs = m_pDot->GetDocsChunk();
m_bNeedDotHits = true;
}
// skip preceding docs
while ( pDotDoc && pDotDoc->m_tRowID < tRowID )
{
while ( pDotDoc->m_tRowID < tRowID )
pDotDoc++;
if ( !HasDocs(pDotDoc) )
{
pDotDoc = m_pDotDocs = m_pDot->GetDocsChunk();
m_bNeedDotHits = true;
}
}
// we will need document hits on both routes below
if ( bNeedDoc1Hits )
{
pHit1 = m_pArg1->GetHits(pDoc1);
bNeedDoc1Hits = false;
}
while ( pHit1->m_tRowID < tRowID )
pHit1++;
if ( bNeedDoc2Hits )
{
pHit2 = m_pArg2->GetHits(pDoc2);
bNeedDoc2Hits = false;
}
while ( pHit2->m_tRowID < tRowID )
pHit2++;
assert ( pHit1->m_tRowID==tRowID );
assert ( pHit2->m_tRowID==tRowID );
DWORD uSentenceEnd = 0;
if ( !pDotDoc || pDotDoc->m_tRowID!=tRowID )
{
// no dots in current document?
// just copy all hits until next document
uSentenceEnd = UINT_MAX;
} else
{
// got both hits and dots
// rewind to relevant dots hits, then do sentence boundary detection
if ( m_bNeedDotHits )
{
pDotHit = m_pDot->GetHits ( pDotDoc );
m_bNeedDotHits = false;
}
while ( pDotHit->m_tRowID < tRowID )
pDotHit++;
}
// do those hits
FilterHits ( pDoc1, pDoc2, pHit1, pHit2, pDotHit, uSentenceEnd, tRowID, iDoc );
// all hits copied; do the next candidate
pDoc1++;
pDoc2++;
}
m_pDoc1 = pDoc1;
m_pDoc2 = pDoc2;
m_pDotDoc = pDotDoc;
m_pHit1 = pHit1;
m_pHit2 = pHit2;
m_pDotHit = pDotHit;
return ReturnDocsChunk ( iDoc, "unit" );
}
template<bool ROWID_LIMITS>
void ExtUnit_T<ROWID_LIMITS>::CollectHits ( const ExtDoc_t * pDocs )
{
CopyMatchingHits ( m_dHits, pDocs );
PrintHitsChunk ( m_dHits.GetLength(), m_iAtomPos, m_dHits.Begin(), this );
}
//////////////////////////////////////////////////////////////////////////
ExtNotNear_c::ExtNotNear_c ( ExtNode_i * pMust, ExtNode_i * pNot, int iDist )
: ExtTwofer_c ( pMust, pNot )
, m_iDist ( iDist )
{
m_sNodeName.SetSprintf ( "NOTNEAR/%d", m_iDist );
// need hits from both nodes
pMust->SetCollectHits();
pNot->SetCollectHits();
}
void ExtNotNear_c::Reset ( const ISphQwordSetup & tSetup )
{
ExtTwofer_c::Reset ( tSetup );
BufferedNode_c::Reset();
m_pHitL = nullptr;
m_pHitR = nullptr;
}
void ExtNotNear_c::DebugDump ( int iLevel )
{
DebugDumpT ( "ExtNotNear_c", iLevel );
}
bool ExtNotNear_c::FilterHits ( RowID_t tRowID, const ExtHit_t * & pMust, const ExtHit_t * & pNot )
{
assert ( pMust && pNot && HasHits(pMust) && HasHits(pNot) );
const int iWasHits = m_dMyHits.GetLength();
bool bRightEmpty = false;
// any hits stream over might have tail hits
while ( pMust->m_tRowID==tRowID )
{
// get NOT next after current MUST
while ( pNot->m_tRowID==tRowID && HITMAN::GetPosWithField ( pNot->m_uHitpos ) < HITMAN::GetPosWithField ( pMust->m_uHitpos ) )
pNot++;
if ( !HasHits(pNot) )
break;
bRightEmpty = ( pNot->m_tRowID!=tRowID );
DWORD uPosMust = HITMAN::GetPosWithField ( pMust->m_uHitpos );
// field is top 8 bytes that is why it safe to add distance straight to GetPosWithField and compare these without checking both fields are eq
if ( bRightEmpty || uPosMust + pMust->m_uMatchlen - 1 + m_iDist<HITMAN::GetPosWithField ( pNot->m_uHitpos ) )
m_dMyHits.Add ( *pMust );
pMust++;
}
return ( iWasHits<m_dMyHits.GetLength() );
}
const ExtDoc_t * ExtNotNear_c::GetDocsChunk()
{
const ExtDoc_t * pDocL = m_pDocL;
const ExtDoc_t * pDocR = m_pDocR;
const ExtHit_t * pHitL = m_pHitL;
const ExtHit_t * pHitR = m_pHitR;
int iDoc = 0;
bool bRightEmpty = false;
while ( iDoc<MAX_BLOCK_DOCS-1 )
{
if ( !WarmupDocs ( pDocL, pHitL, m_pLeft.get() ) )
break;
if ( !bRightEmpty )
{
if ( HasDocs(pDocL) )
m_pRight->HintRowID ( pDocL->m_tRowID );
bRightEmpty = !WarmupDocs ( pDocR, pHitR, m_pRight.get() );
}
RowID_t tNotRowID = ( bRightEmpty ? INVALID_ROWID : pDocR->m_tRowID );
// copy none matched from MUST
while ( pDocL->m_tRowID < tNotRowID && iDoc<MAX_BLOCK_DOCS-1 )
{
m_dDocs[iDoc++] = *pDocL;
while ( pHitL->m_tRowID<pDocL->m_tRowID )
pHitL++;
while ( pHitL->m_tRowID==pDocL->m_tRowID )
m_dMyHits.Add ( *pHitL++ );
pDocL++;
}
if ( !HasDocs(pDocL) || iDoc==MAX_BLOCK_DOCS-1 )
continue;
if ( bRightEmpty )
continue;
// skip NOT until min accepted id
while ( pDocR->m_tRowID<pDocL->m_tRowID ) pDocR++;
while ( pHitR->m_tRowID<pDocR->m_tRowID ) pHitR++;
if ( !HasHits(pHitR) || pDocL->m_tRowID!=pDocR->m_tRowID )
continue;
// filter must with not
assert ( HasDocs(pDocL) );
assert ( pDocL->m_tRowID==pDocR->m_tRowID );
assert ( pDocL->m_tRowID==pHitL->m_tRowID && pDocL->m_tRowID==pHitR->m_tRowID );
bool bMatched = FilterHits ( pDocL->m_tRowID, pHitL, pHitR );
bMatched |= pHitL->m_tRowID==pDocL->m_tRowID;
// copy MUST tail hits
while ( pHitL->m_tRowID==pDocL->m_tRowID )
m_dMyHits.Add ( *pHitL++ );
if ( bMatched )
m_dDocs[iDoc++] = *pDocL;
pDocL++;
pDocR++;
if ( HasDocs(pDocL) )
{
while ( pHitL->m_tRowID<pDocL->m_tRowID )
pHitL++;
}
if ( HasDocs(pDocR) )
{
while ( pHitR->m_tRowID<pDocR->m_tRowID )
pHitR++;
}
}
m_pDocL = pDocL;
m_pDocR = pDocR;
m_pHitL = pHitL;
m_pHitR = pHitR;
return ReturnDocsChunk ( iDoc, "notnear" );
}
void ExtNotNear_c::CollectHits ( const ExtDoc_t * pDocs )
{
CopyMatchingHits ( m_dHits, pDocs );
PrintHitsChunk ( m_dHits.GetLength(), m_iAtomPos, m_dHits.Begin(), this );
}
//////////////////////////////////////////////////////////////////////////
// INTRA-BATCH CACHING
//////////////////////////////////////////////////////////////////////////
/// container that does intra-batch query-sub-tree caching
/// actually carries the cached data, NOT to be recreated frequently (see thin wrapper below)
class NodeCacheContainer_c
{
friend class ExtNodeCached_c;
friend class CSphQueryNodeCache;
public:
void Release();
ExtNode_i * CreateCachedWrapper ( ExtNode_i* pChild, const XQNode_t * pRawChild, const ISphQwordSetup & tSetup );
private:
int m_iRefCount {1};
bool m_bStateOk {true};
const ISphQwordSetup * m_pSetup {nullptr};
CSphVector<ExtDoc_t> m_dDocs;
CSphVector<ExtHit_t> m_dHits;
int m_iAtomPos {0}; // minimal position from original donor, used for shifting
CSphQueryNodeCache * m_pNodeCache {nullptr};
bool WarmupCache ( ExtNode_i * pChild, int iQWords );
void Invalidate();
};
/// cached node wrapper to be injected into actual search trees
/// (special container actually carries all the data and does the work, see below)
class ExtNodeCached_c : public ExtNode_c
{
friend class NodeCacheContainer_c;
public:
~ExtNodeCached_c() override;
void Reset ( const ISphQwordSetup & tSetup ) override;
void HintRowID ( RowID_t tRowID ) override {}
const ExtDoc_t * GetDocsChunk() override;
void CollectHits ( const ExtDoc_t * pMatched ) override;
int GetQwords ( ExtQwordsHash_t & hQwords ) override;
void SetQwordsIDF ( const ExtQwordsHash_t & hQwords ) override;
void GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const override;
bool GotHitless () override;
uint64_t GetWordID() const override;
void SetCollectHits() override;
NodeEstimate_t Estimate ( int64_t iTotalDocs ) const override { return { 0.0f, 0, 0 }; }
void SetRowidBoundaries ( const RowIdBoundaries_t & tBoundaries ) override;
private:
NodeCacheContainer_c * m_pNode;
const ExtDoc_t * m_pHitDoc; ///< points to entry in m_dDocs which GetHitsChunk() currently emits hits for
CSphString * m_pWarning;
int m_iHitIndex; ///< store the current position in m_Hits for GetHitsChunk()
int m_iDocIndex; ///< store the current position in m_Docs for GetDocsChunk()
ExtNode_i * m_pChild; ///< pointer to donor for the sake of AtomPos procession
int m_iQwords; ///< number of tokens in parent query
// creation possible ONLY via NodeCacheContainer_c
ExtNodeCached_c ( NodeCacheContainer_c * pNode, ExtNode_i * pChild );
void StepForwardToHitsFor ( RowID_t tRowID );
bool RewindDocs ( const ExtDoc_t * & pDoc, const ExtDoc_t * & pMatched );
};
//////////////////////////////////////////////////////////////////////////
void NodeCacheContainer_c::Release()
{
if ( --m_iRefCount<=0 )
Invalidate();
}
ExtNode_i * NodeCacheContainer_c::CreateCachedWrapper ( ExtNode_i * pChild, const XQNode_t * pRawChild, const ISphQwordSetup & tSetup )
{
if ( !m_bStateOk )
return pChild;
// wow! we have a virgin!
if ( !m_dDocs.GetLength() )
{
m_iRefCount = pRawChild->GetCount();
m_pSetup = &tSetup;
}
return new ExtNodeCached_c ( this, pChild );
}
bool NodeCacheContainer_c::WarmupCache ( ExtNode_i * pChild, int iQwords )
{
assert ( pChild );
assert ( m_pSetup );
m_iAtomPos = pChild->GetAtomPos();
const ExtDoc_t * pChunk = pChild->GetDocsChunk();
while ( pChunk )
{
const ExtDoc_t * pChunkHits = pChunk;
bool iHasDocs = false;
for ( ; HasDocs(pChunk); pChunk++ )
{
m_dDocs.Add ( *pChunk );
// exclude number or Qwords from FIDF
m_dDocs.Last().m_fTFIDF *= iQwords;
m_pNodeCache->m_iMaxCachedDocs--;
iHasDocs = true;
}
if ( iHasDocs )
{
const ExtHit_t * pHit = pChild->GetHits(pChunkHits);
while ( HasHits(pHit) )
{
m_dHits.Add ( *pHit++ );
m_pNodeCache->m_iMaxCachedHits--;
}
}
// too many values, stop caching
if ( m_pNodeCache->m_iMaxCachedDocs<0 || m_pNodeCache->m_iMaxCachedHits<0 )
{
Invalidate ();
pChild->Reset ( *m_pSetup );
m_pSetup = NULL;
return false;
}
pChunk = pChild->GetDocsChunk();
}
m_dDocs.Add().m_tRowID = INVALID_ROWID;
m_dHits.Add().m_tRowID = INVALID_ROWID;
pChild->Reset ( *m_pSetup );
m_pSetup = NULL;
return true;
}
void NodeCacheContainer_c::Invalidate()
{
m_pNodeCache->m_iMaxCachedDocs += m_dDocs.GetLength();
m_pNodeCache->m_iMaxCachedHits += m_dDocs.GetLength();
m_dDocs.Reset();
m_dHits.Reset();
m_bStateOk = false;
}
//////////////////////////////////////////////////////////////////////////
ExtNodeCached_c::~ExtNodeCached_c ()
{
SafeDelete ( m_pChild );
SafeRelease ( m_pNode );
}
void ExtNodeCached_c::Reset ( const ISphQwordSetup & tSetup )
{
if ( m_pChild )
m_pChild->Reset ( tSetup );
m_iHitIndex = 0;
m_iDocIndex = 0;
m_pHitDoc = NULL;
SetMaxTimeout ( tSetup.m_iMaxTimer );
m_pWarning = tSetup.m_pWarning;
}
int ExtNodeCached_c::GetQwords ( ExtQwordsHash_t & hQwords )
{
if ( !m_pChild )
return -1;
int iChildAtom = m_pChild->GetQwords ( hQwords );
if ( iChildAtom<0 )
return -1;
return m_iAtomPos + iChildAtom;
}
void ExtNodeCached_c::SetQwordsIDF ( const ExtQwordsHash_t & hQwords )
{
m_iQwords = hQwords.GetLength();
if ( m_pNode->m_pSetup && m_pChild )
{
m_pChild->SetQwordsIDF ( hQwords );
m_pNode->WarmupCache ( m_pChild, m_iQwords );
}
}
void ExtNodeCached_c::GetTerms ( const ExtQwordsHash_t & hQwords, CSphVector<TermPos_t> & dTermDupes ) const
{
if ( m_pChild )
m_pChild->GetTerms ( hQwords, dTermDupes );
}
bool ExtNodeCached_c::GotHitless ()
{
if ( m_pChild )
return m_pChild->GotHitless();
return false;
}
uint64_t ExtNodeCached_c::GetWordID() const
{
if ( m_pChild )
return m_pChild->GetWordID();
return 0;
}
void ExtNodeCached_c::SetCollectHits()
{
if ( m_pChild )
m_pChild->SetCollectHits();
}
void ExtNodeCached_c::SetRowidBoundaries ( const RowIdBoundaries_t & tBoundaries )
{
if ( m_pChild )
m_pChild->SetRowidBoundaries(tBoundaries);
}
ExtNodeCached_c::ExtNodeCached_c ( NodeCacheContainer_c * pNode, ExtNode_i * pChild )
: ExtNode_c (0)
, m_pNode ( pNode )
, m_pHitDoc ( NULL )
, m_pWarning ( NULL )
, m_iHitIndex ( 0 )
, m_iDocIndex ( 0 )
, m_pChild ( pChild )
, m_iQwords ( 0 )
{
m_iAtomPos = pChild->GetAtomPos();
}
void ExtNodeCached_c::StepForwardToHitsFor ( RowID_t tRowID )
{
assert ( m_pNode );
assert ( m_pNode->m_bStateOk );
CSphVector<ExtHit_t> & dHits = m_pNode->m_dHits;
int iEnd = dHits.GetLength()-1;
if ( m_iHitIndex>=iEnd )
return;
if ( dHits[m_iHitIndex].m_tRowID==tRowID )
return;
m_iHitIndex = sphBinarySearchFirst ( dHits.Begin(), m_iHitIndex, iEnd, bind ( &ExtHit_t::m_tRowID ),tRowID );
}
const ExtDoc_t * ExtNodeCached_c::GetDocsChunk()
{
if ( !m_pNode || !m_pChild )
return NULL;
if ( !m_pNode->m_bStateOk )
return m_pChild->GetDocsChunk();
if ( TimeExceeded() )
{
if ( m_pWarning )
*m_pWarning = "query time exceeded max_query_time";
return nullptr;
}
if ( sph::TimeExceeded ( m_iCheckTimePoint ) )
{
if ( session::GetKilled() )
{
if ( m_pWarning )
*m_pWarning = "query was killed";
return nullptr;
}
Threads::Coro::RescheduleAndKeepCrashQuery();
}
int iDoc = Min ( m_iDocIndex+MAX_BLOCK_DOCS-1, m_pNode->m_dDocs.GetLength()-1 ) - m_iDocIndex;
memcpy ( &m_dDocs[0], &m_pNode->m_dDocs[m_iDocIndex], sizeof(ExtDoc_t)*iDoc );
m_iDocIndex += iDoc;
// funny trick based on the formula of FIDF calculation.
for ( int i=0; i<iDoc; i++ )
m_dDocs[i].m_fTFIDF /= m_iQwords;
return ReturnDocsChunk ( iDoc, "cached" );
}
bool ExtNodeCached_c::RewindDocs ( const ExtDoc_t * & pDoc, const ExtDoc_t * & pMatched )
{
do
{
while ( pDoc->m_tRowID < pMatched->m_tRowID )
pDoc++;
if ( !HasDocs(pDoc) )
return false; // matched docs block is over for me, gimme another one
while ( pMatched->m_tRowID < pDoc->m_tRowID )
pMatched++;
if ( !HasDocs(pMatched) )
return false; // matched doc block did not yet begin for me, gimme another one
}
while ( pDoc->m_tRowID!=pMatched->m_tRowID );
// setup hitlist reader
StepForwardToHitsFor ( pDoc->m_tRowID );
return true;
}
void ExtNodeCached_c::CollectHits ( const ExtDoc_t * pMatched )
{
if ( !m_pNode || !m_pChild )
return;
if ( !m_pNode->m_bStateOk )
{
const ExtHit_t * pHit = m_pChild->GetHits(pMatched);
while ( HasHits(pHit) )
m_dHits.Add ( *pHit++ );
return;
}
if ( !pMatched )
return;
// aim to the right document
const ExtDoc_t * pDoc = m_pHitDoc;
m_pHitDoc = NULL;
if ( !pDoc )
{
// find match
pDoc = m_dDocs;
RewindDocs ( pDoc, pMatched );
}
// hit emission
while ( true )
{
// get next hit
ExtHit_t & tCachedHit = m_pNode->m_dHits[m_iHitIndex];
if ( !HasHits(&tCachedHit) )
break;
if ( tCachedHit.m_tRowID==pDoc->m_tRowID )
{
m_iHitIndex++;
ExtHit_t & tHit = m_dHits.Add();
tHit = tCachedHit;
tHit.m_uQuerypos = (WORD)( tCachedHit.m_uQuerypos + m_iAtomPos - m_pNode->m_iAtomPos );
}
else
{
// no more hits; get next acceptable document
pDoc++;
if ( !RewindDocs ( pDoc, pMatched ) )
{
pDoc = nullptr;
break;
}
assert ( pDoc->m_tRowID==pMatched->m_tRowID );
// setup hitlist reader
StepForwardToHitsFor ( pDoc->m_tRowID );
}
}
m_pHitDoc = pDoc;
}
//////////////////////////////////////////////////////////////////////////
CSphQueryNodeCache::CSphQueryNodeCache ( int iCells, int iMaxCachedDocs, int iMaxCachedHits )
{
if ( iCells>0 && iMaxCachedHits>0 && iMaxCachedDocs>0 )
{
m_pPool = new NodeCacheContainer_c [ iCells ];
for ( int i=0; i<iCells; i++ )
m_pPool[i].m_pNodeCache = this;
}
m_iMaxCachedDocs = iMaxCachedDocs / sizeof(ExtDoc_t);
m_iMaxCachedHits = iMaxCachedHits / sizeof(ExtHit_t);
}
CSphQueryNodeCache::~CSphQueryNodeCache ()
{
SafeDeleteArray ( m_pPool );
}
ExtNode_i * CSphQueryNodeCache::CreateProxy ( ExtNode_i * pChild, const XQNode_t * pRawChild, const ISphQwordSetup & tSetup )
{
// TEMPORARILY DISABLED
return pChild;
/* if ( m_iMaxCachedDocs<=0 || m_iMaxCachedHits<=0 )
return pChild;
assert ( pRawChild );
return m_pPool [ pRawChild->GetOrder() ].CreateCachedWrapper ( pChild, pRawChild, tSetup );*/
}
//////////////////////////////////////////////////////////////////////////
std::unique_ptr<ExtNode_i> CreatePseudoFTNode ( ExtNode_i * pNode, RowidIterator_i * pIterator )
{
return std::make_unique<ExtAndRightHits_c> ( new ExtIterator_c(pIterator), pNode );
}
/// Immediately interrupt current operation
void sphInterruptNow()
{
g_bInterruptNow = true;
}
bool sphInterrupted()
{
return g_bInterruptNow;
}
| 178,865
|
C++
|
.cpp
| 5,197
| 31.670194
| 219
| 0.678598
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
16,861
|
taskflushattrs.cpp
|
manticoresoftware_manticoresearch/src/taskflushattrs.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "taskflushattrs.h"
#include "searchdtask.h"
#include "searchdaemon.h"
#include "coroutine.h"
//////////////////////////////////////////////////////////////////////////
struct FlushState_t
{
Threads::Coro::Event_c m_tFlushFinished;
std::atomic<int> m_iDemandEvents {0}; // if worker need to set the event
int m_iFlushTag = 0; ///< last flushed tag
};
static FlushState_t g_tFlush;
enum class Saved_e {
NOTHING, // no dirty indexes found, nothing saved
NOT_ALL, // not all indexes saved (some failed)
ALL, // whole save successfully completed
};
static Saved_e CheckSaveIndexes ()
{
CSphString sError;
auto eSaveState = Saved_e::ALL;
bool bDirty = false;
ServedSnap_t hLocals = g_pLocalIndexes->GetHash();
for ( auto& tIt : *hLocals )
{
assert ( tIt.second );
RIdx_c pIdx { tIt.second };
if ( pIdx->GetAttributeStatus() )
{
bDirty = true;
if ( !pIdx->SaveAttributes ( sError ) )
{
sphWarning ( "table %s: attrs save failed: %s", tIt.first.cstr(), sError.cstr() );
eSaveState = Saved_e::NOT_ALL;
}
}
}
if ( !bDirty )
return Saved_e::NOTHING;
++g_tFlush.m_iFlushTag;
return eSaveState;
}
/* About setting event g_tFlush.m_tFlushFinished
* flushing attributes may be engaged in two ways: either by timer (scheduled), either by command 'flush attributes'.
* First needs nothing - it just does the things and re-schedule itself.
* Second needs event to trace end of operation.
* So, there are 2 slightly different operations: 'just flush' and 'flush and signal'.
* For this kind of task we may have at most 1 running job.
* If you issued 'flush attributes' and it is already in progress, it will be dropped.
* We increase the counter of awaited events. And when the task finishes, it check N of awaited and fire demanded N of events.
*/
namespace {
void SaveIndexesMT ()
{
sphLogDebug ( "attrflush: doing the check" );
auto pDesc = PublishSystemInfo ( "SAVE tables" );
if ( CheckSaveIndexes ()==Saved_e::NOTHING )
sphLogDebug ( "attrflush: no dirty tables found" );
int iFireOnExit = g_tFlush.m_iDemandEvents.exchange ( 0 );
for ( int i=0; i<iFireOnExit; ++i )
g_tFlush.m_tFlushFinished.SetEvent ();
}
}
int CommandFlush () EXCLUDES ( MainThread )
{
// force a check, and wait it until completes
sphLogDebug ( "attrflush: forcing check, tag=%d", g_tFlush.m_iFlushTag );
g_tFlush.m_iDemandEvents.fetch_add ( 1, std::memory_order_relaxed );
Threads::StartJob ( SaveIndexesMT );
g_tFlush.m_tFlushFinished.WaitEvent ();
sphLogDebug ( "attrflush: check finished, tag=%d", g_tFlush.m_iFlushTag );
return g_tFlush.m_iFlushTag;
}
static int64_t g_iAttrFlushPeriodUs = 0; // in useconds; 0 means "do not flush"
void SetAttrFlushPeriod ( int64_t iPeriod )
{
g_iAttrFlushPeriodUs = iPeriod;
}
void ScheduleFlushAttrs ()
{
if ( !g_iAttrFlushPeriodUs )
return;
static TaskID iScheduledSave = TaskManager::RegisterGlobal ( "Scheduled save tables", 1 );
static auto iLastCheckFinishedTime = sphMicroTimer ();
TaskManager::ScheduleJob ( iScheduledSave, iLastCheckFinishedTime + g_iAttrFlushPeriodUs, []
{
SaveIndexesMT();
iLastCheckFinishedTime = sphMicroTimer();
ScheduleFlushAttrs();
});
}
// called from main shutdown and expects problem reporting
bool FinallySaveIndexes ()
{
return Threads::CallCoroutineRes ( [] { return CheckSaveIndexes ()!=Saved_e::NOT_ALL; });
}
| 3,860
|
C++
|
.cpp
| 107
| 34.074766
| 126
| 0.7188
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,862
|
taskflushbinlog.cpp
|
manticoresoftware_manticoresearch/src/taskflushbinlog.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
#include "taskflushbinlog.h"
#include "searchdtask.h"
#include "searchdaemon.h"
#include "binlog.h"
static void ScheduleFlushBinlogNext ()
{
static int iFlushBinlogTask = TaskManager::RegisterGlobal ( "Flush binlog", 1 );
if ( sphInterrupted () )
return;
TaskManager::ScheduleJob ( iFlushBinlogTask, Binlog::NextFlushTimestamp (), []
{
auto pDesc = PublishSystemInfo ( "FLUSH RT BINLOG" );
Binlog::Flush();
ScheduleFlushBinlogNext();
});
}
void StartRtBinlogFlushing ()
{
if ( !Binlog::IsFlushEnabled() )
return;
ScheduleFlushBinlogNext ();
}
| 1,038
|
C++
|
.cpp
| 32
| 30.75
| 81
| 0.751249
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,863
|
sortergroup.cpp
|
manticoresoftware_manticoresearch/src/sortergroup.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sortergroup.h"
#include "sortertraits.h"
#include "sorterprecalc.h"
#include "queuecreator.h"
#include "grouper.h"
#include "sphinxint.h"
#include "sortcomp.h"
#include "distinct.h"
/// group sorting functor
template < typename COMPGROUP >
struct GroupSorter_fn : public CSphMatchComparatorState, public MatchSortAccessor_t
{
const VecTraits_T<CSphMatch> & m_dBase;
explicit GroupSorter_fn ( const CSphMatchQueueTraits& dBase )
: m_dBase ( dBase.GetMatches() )
{}
FORCE_INLINE bool IsLess ( int a, int b ) const
{
return COMPGROUP::IsLess ( m_dBase[b], m_dBase[a], *this );
}
};
class SubGroupSorter_fn : public ISphNoncopyable
{
const VecTraits_T<CSphMatch> & m_dBase;
const CSphMatchComparatorState& m_tState;
const ISphMatchComparator * m_pComp;
public:
SubGroupSorter_fn ( const CSphMatchQueueTraits & dBase, const ISphMatchComparator * pC )
: m_dBase ( dBase.GetMatches () )
, m_tState ( dBase.GetState() )
, m_pComp ( pC )
{
assert ( m_pComp );
m_pComp->AddRef();
}
~SubGroupSorter_fn()
{
m_pComp->Release();
}
const ISphMatchComparator * GetComparator() const
{
return m_pComp;
}
bool MatchIsGreater ( const CSphMatch & a, const CSphMatch & b ) const
{
return m_pComp->VirtualIsLess ( b, a, m_tState );
}
// inverse order, i.e. work as IsGreater
bool IsLess ( int a, int b ) const
{
return m_pComp->VirtualIsLess ( m_dBase[b], m_dBase[a], m_tState );
}
};
/// match sorter with k-buffering and group-by - common part
template<typename COMPGROUP, typename UNIQ, int DISTINCT, bool NOTIFICATIONS>
class KBufferGroupSorter_T : public CSphMatchQueueTraits, protected BaseGroupSorter_c
{
using MYTYPE = KBufferGroupSorter_T<COMPGROUP,UNIQ,DISTINCT,NOTIFICATIONS>;
using BASE = CSphMatchQueueTraits;
public:
KBufferGroupSorter_T ( const ISphMatchComparator * pComp, const CSphQuery * pQuery, const CSphGroupSorterSettings & tSettings )
: CSphMatchQueueTraits ( tSettings.m_iMaxMatches*GROUPBY_FACTOR )
, BaseGroupSorter_c ( tSettings )
, m_eGroupBy ( pQuery->m_eGroupFunc )
, m_iLimit ( tSettings.m_iMaxMatches )
, m_tGroupSorter (*this)
, m_tSubSorter ( *this, pComp )
{
assert ( GROUPBY_FACTOR>1 );
assert ( !DISTINCT || tSettings.m_pDistinctFetcher );
if constexpr ( NOTIFICATIONS )
m_dJustPopped.Reserve ( m_iSize );
m_pGrouper = tSettings.m_pGrouper;
m_pDistinctFetcher = tSettings.m_pDistinctFetcher;
m_tUniq.SetAccuracy ( tSettings.m_iDistinctAccuracy );
}
/// schema setup
void SetSchema ( ISphSchema * pSchema, bool bRemapCmp ) final
{
if ( m_pSchema )
{
FixupLocators ( m_pSchema, pSchema );
m_tGroupSorter.FixupLocators ( m_pSchema, pSchema, bRemapCmp );
m_tPregroup.ResetAttrs ();
m_dAggregates.Apply ( [] ( AggrFunc_i * pAggr ) { SafeDelete ( pAggr ); } );
m_dAggregates.Resize ( 0 );
m_dAvgs.Resize ( 0 );
}
BASE::SetSchema ( pSchema, bRemapCmp );
SetupBaseGrouper ( pSchema, DISTINCT, &m_dAvgs );
}
/// check if this sorter does groupby
bool IsGroupby () const final
{
return true;
}
/// set blob pool pointer (for string+groupby sorters)
void SetBlobPool ( const BYTE * pBlobPool ) final
{
BlobPool_c::SetBlobPool ( pBlobPool );
m_pGrouper->SetBlobPool ( pBlobPool );
if ( m_pDistinctFetcher )
m_pDistinctFetcher->SetBlobPool(pBlobPool);
}
void SetColumnar ( columnar::Columnar_i * pColumnar ) final
{
CSphMatchQueueTraits::SetColumnar(pColumnar);
BaseGroupSorter_c::SetColumnar(pColumnar);
m_pGrouper->SetColumnar(pColumnar);
if ( m_pDistinctFetcher )
m_pDistinctFetcher->SetColumnar(pColumnar);
}
/// get entries count
int GetLength () override
{
return Min ( Used(), m_iLimit );
}
/// set group comparator state
void SetGroupState ( const CSphMatchComparatorState & tState ) final
{
m_tGroupSorter.m_fnStrCmp = tState.m_fnStrCmp;
// FIXME! manual bitwise copying.. yuck
for ( int i=0; i<CSphMatchComparatorState::MAX_ATTRS; ++i )
{
m_tGroupSorter.m_eKeypart[i] = tState.m_eKeypart[i];
m_tGroupSorter.m_tLocator[i] = tState.m_tLocator[i];
}
m_tGroupSorter.m_uAttrDesc = tState.m_uAttrDesc;
m_tGroupSorter.m_iNow = tState.m_iNow;
// check whether we sort by distinct
if constexpr ( DISTINCT )
{
const CSphColumnInfo * pDistinct = m_pSchema->GetAttr("@distinct");
assert(pDistinct);
for ( const auto & tLocator : m_tGroupSorter.m_tLocator )
if ( tLocator==pDistinct->m_tLocator )
{
m_bSortByDistinct = true;
break;
}
}
}
bool CanBeCloned() const final { return !DISTINCT && BASE::CanBeCloned(); }
protected:
ESphGroupBy m_eGroupBy; ///< group-by function
int m_iLimit; ///< max matches to be retrieved
UNIQ m_tUniq;
bool m_bSortByDistinct = false;
GroupSorter_fn<COMPGROUP> m_tGroupSorter;
SubGroupSorter_fn m_tSubSorter;
CSphVector<AggrFunc_i *> m_dAvgs;
bool m_bAvgFinal = false;
CSphVector<SphAttr_t> m_dDistinctKeys;
static const int GROUPBY_FACTOR = 4; ///< allocate this times more storage when doing group-by (k, as in k-buffer)
/// finalize distinct counters
template <typename FIND>
void Distinct ( FIND&& fnFind )
{
m_tUniq.Sort ();
SphGroupKey_t uGroup;
for ( int iCount = m_tUniq.CountStart ( uGroup ); iCount; iCount = m_tUniq.CountNext ( uGroup ) )
{
CSphMatch * pMatch = fnFind ( uGroup );
if ( pMatch )
pMatch->SetAttr ( m_tLocDistinct, iCount );
}
}
inline void SetupBaseGrouperWrp ( ISphSchema * pSchema, CSphVector<AggrFunc_i *> * pAvgs )
{
SetupBaseGrouper ( pSchema, DISTINCT, pAvgs );
}
void CloneKBufferGroupSorter ( MYTYPE* pClone ) const
{
// basic clone
BASE::CloneTo ( pClone );
// actions from SetGroupState
pClone->m_bSortByDistinct = m_bSortByDistinct;
pClone->m_tGroupSorter.m_fnStrCmp = m_tGroupSorter.m_fnStrCmp;
for ( int i = 0; i<CSphMatchComparatorState::MAX_ATTRS; i++ )
{
pClone->m_tGroupSorter.m_eKeypart[i] = m_tGroupSorter.m_eKeypart[i];
pClone->m_tGroupSorter.m_tLocator[i] = m_tGroupSorter.m_tLocator[i];
}
pClone->m_tGroupSorter.m_uAttrDesc = m_tGroupSorter.m_uAttrDesc;
pClone->m_tGroupSorter.m_iNow = m_tGroupSorter.m_iNow;
// complete SetSchema
pClone->m_dAvgs.Resize ( 0 );
pClone->SetupBaseGrouperWrp ( pClone->m_pSchema, &pClone->m_dAvgs );
// m_pGrouper also need to be cloned (otherwise SetBlobPool will cause races)
if ( m_pGrouper )
pClone->m_pGrouper = m_pGrouper->Clone ();
if ( m_pDistinctFetcher )
pClone->m_pDistinctFetcher = m_pDistinctFetcher->Clone ();
}
template<typename SORTER> SORTER * CloneSorterT () const
{
CSphQuery dFoo;
dFoo.m_iMaxMatches = m_iLimit;
dFoo.m_eGroupFunc = m_eGroupBy;
auto pClone = new SORTER ( m_tSubSorter.GetComparator (), &dFoo, *this );
CloneKBufferGroupSorter ( pClone );
return pClone;
}
CSphVector<AggrFunc_i *> GetAggregatesWithoutAvgs() const
{
CSphVector<AggrFunc_i *> dAggrs;
if ( m_dAggregates.GetLength ()!=m_dAvgs.GetLength ())
{
dAggrs = m_dAggregates;
for ( auto * pAvg : this->m_dAvgs )
dAggrs.RemoveValue ( pAvg );
}
return dAggrs;
}
FORCE_INLINE void FreeMatchPtrs ( int iMatch, bool bNotify=true )
{
if ( NOTIFICATIONS && bNotify )
m_dJustPopped.Add ( RowTagged_t ( m_dData[iMatch] ) );
m_pSchema->FreeDataPtrs ( m_dData[iMatch] );
// on final pass we totally wipe match.
// That is need, since otherwise such 'garbage' matches with non-null m_pDynamic
// will be targeted in d-tr with FreeDataPtrs with possible another(!) schema
if ( !bNotify )
m_dData[iMatch].ResetDynamic ();
}
template <bool GROUPED>
FORCE_INLINE void UpdateDistinct ( const CSphMatch & tEntry, const SphGroupKey_t uGroupKey )
{
int iCount = 1;
if constexpr ( GROUPED )
iCount = (int)tEntry.GetAttr ( m_tLocDistinct );
assert(m_pDistinctFetcher);
if constexpr ( DISTINCT==1 )
m_tUniq.Add ( {uGroupKey, m_pDistinctFetcher->GetKey(tEntry), iCount} );
else
{
m_pDistinctFetcher->GetKeys ( tEntry, this->m_dDistinctKeys );
for ( auto i : this->m_dDistinctKeys )
m_tUniq.Add ( {uGroupKey, i, iCount} );
}
}
void RemoveDistinct ( VecTraits_T<SphGroupKey_t>& dRemove )
{
// sort and compact
if ( !m_bSortByDistinct )
m_tUniq.Sort ();
m_tUniq.Compact ( dRemove );
}
};
/// match sorter with k-buffering and group-by
/// invoking by select ... group by ... where only plain attributes (i.e. NO mva, NO jsons)
template < typename COMPGROUP, typename UNIQ, int DISTINCT, bool NOTIFICATIONS, bool HAS_AGGREGATES >
class CSphKBufferGroupSorter : public KBufferGroupSorter_T<COMPGROUP,UNIQ,DISTINCT,NOTIFICATIONS>
{
using MYTYPE = CSphKBufferGroupSorter<COMPGROUP, UNIQ, DISTINCT, NOTIFICATIONS, HAS_AGGREGATES>;
bool m_bMatchesFinalized = false;
int m_iMaxUsed = -1;
protected:
OpenHashTableFastClear_T <SphGroupKey_t, CSphMatch *> m_hGroup2Match;
// since we inherit from template, we need to write boring 'using' block
using KBufferGroupSorter = KBufferGroupSorter_T<COMPGROUP, UNIQ, DISTINCT, NOTIFICATIONS>;
using KBufferGroupSorter::m_eGroupBy;
using KBufferGroupSorter::m_pGrouper;
using KBufferGroupSorter::m_iLimit;
using KBufferGroupSorter::m_tUniq;
using KBufferGroupSorter::m_bSortByDistinct;
using KBufferGroupSorter::m_tGroupSorter;
using KBufferGroupSorter::m_tSubSorter;
using KBufferGroupSorter::m_dAvgs;
using KBufferGroupSorter::GROUPBY_FACTOR;
using KBufferGroupSorter::GetAggregatesWithoutAvgs;
using KBufferGroupSorter::Distinct;
using KBufferGroupSorter::UpdateDistinct;
using KBufferGroupSorter::RemoveDistinct;
using KBufferGroupSorter::FreeMatchPtrs;
using KBufferGroupSorter::m_bAvgFinal;
using CSphGroupSorterSettings::m_tLocGroupby;
using CSphGroupSorterSettings::m_tLocCount;
using CSphGroupSorterSettings::m_tLocDistinct;
using BaseGroupSorter_c::EvalHAVING;
using BaseGroupSorter_c::AggrSetup;
using BaseGroupSorter_c::AggrUpdate;
using BaseGroupSorter_c::AggrUngroup;
using CSphMatchQueueTraits::m_iSize;
using CSphMatchQueueTraits::m_dData;
using CSphMatchQueueTraits::Get;
using CSphMatchQueueTraits::Add;
using CSphMatchQueueTraits::Used;
using CSphMatchQueueTraits::ResetAfterFlatten;
using CSphMatchQueueTraits::ResetDynamic;
using CSphMatchQueueTraits::ResetDynamicFreeData;
using MatchSorter_c::m_iTotal;
using MatchSorter_c::m_tJustPushed;
using MatchSorter_c::m_dJustPopped;
using MatchSorter_c::m_pSchema;
public:
/// ctor
CSphKBufferGroupSorter ( const ISphMatchComparator * pComp, const CSphQuery * pQuery, const CSphGroupSorterSettings & tSettings )
: KBufferGroupSorter ( pComp, pQuery, tSettings )
, m_hGroup2Match ( tSettings.m_iMaxMatches*GROUPBY_FACTOR )
{}
bool Push ( const CSphMatch & tEntry ) override { return PushEx<false> ( tEntry, m_pGrouper->KeyFromMatch(tEntry), false, false, true, nullptr ); }
void Push ( const VecTraits_T<const CSphMatch> & dMatches ) override { assert ( 0 && "Not supported in grouping"); }
bool PushGrouped ( const CSphMatch & tEntry, bool ) override { return PushEx<true> ( tEntry, tEntry.GetAttr ( m_tLocGroupby ), false, false, true, nullptr ); }
ISphMatchSorter * Clone() const override { return this->template CloneSorterT<MYTYPE>(); }
/// store all entries into specified location in sorted order, and remove them from queue
int Flatten ( CSphMatch * pTo ) override
{
FinalizeMatches();
auto dAggrs = GetAggregatesWithoutAvgs();
const CSphMatch * pBegin = pTo;
for ( auto iMatch : this->m_dIData )
{
CSphMatch & tMatch = m_dData[iMatch];
if constexpr ( HAS_AGGREGATES )
dAggrs.Apply ( [&tMatch] ( AggrFunc_i * pAggr ) { pAggr->Finalize ( tMatch ); } );
if ( !EvalHAVING ( tMatch ))
{
FreeMatchPtrs ( iMatch, false );
continue;
}
Swap ( *pTo, tMatch );
++pTo;
}
m_iTotal = 0;
m_bMatchesFinalized = false;
if constexpr ( DISTINCT )
m_tUniq.Reset();
ResetAfterFlatten ();
m_iMaxUsed = ResetDynamic ( m_iMaxUsed );
return int ( pTo-pBegin );
}
void MoveTo ( ISphMatchSorter * pRhs, bool bCopyMeta ) final
{
if ( !Used () )
return;
auto& dRhs = *(MYTYPE *) pRhs;
if ( dRhs.IsEmpty () )
{
CSphMatchQueueTraits::SwapMatchQueueTraits ( dRhs );
dRhs.m_hGroup2Match = std::move ( m_hGroup2Match );
dRhs.m_bMatchesFinalized = m_bMatchesFinalized;
dRhs.m_iMaxUsed = m_iMaxUsed;
dRhs.m_tUniq = std::move(m_tUniq);
m_iMaxUsed = -1;
return;
}
bool bUniqUpdated = false;
if ( !m_bMatchesFinalized && bCopyMeta )
{
// can not move m_tUniq into dRhs as move invalidates m_tUniq then breaks FinalizeMatches
m_tUniq.CopyTo ( dRhs.m_tUniq );
bUniqUpdated = true;
}
// if we're copying meta (uniq counters), we don't need distinct calculation right now
// we can do it later after all sorters are merged
FinalizeMatches ( !bCopyMeta );
// matches in dRhs are using a new (standalone) schema
// however, some supposedly unused matches still have old schema
// they were not cleared immediately for performance reasons
// we need to do that now
for ( int i = dRhs.m_dIData.GetLength(); i < dRhs.m_dData.GetLength(); i++ )
{
int iId = *(dRhs.m_dIData.Begin()+i);
dRhs.m_dData[iId].ResetDynamic();
}
dRhs.m_bUpdateDistinct = !bUniqUpdated;
dRhs.SetMerge(true);
// just push in heap order
// since we have grouped matches, it is not always possible to move them,
// so use plain push instead
for ( auto iMatch : this->m_dIData )
dRhs.PushGrouped ( m_dData[iMatch], false );
dRhs.m_bUpdateDistinct = true;
dRhs.SetMerge(false);
// once we're done copying, cleanup
m_iMaxUsed = ResetDynamicFreeData ( m_iMaxUsed );
}
void Finalize ( MatchProcessor_i & tProcessor, bool, bool bFinalizeMatches ) override
{
if ( !Used() )
return;
if ( bFinalizeMatches )
FinalizeMatches();
else if constexpr ( DISTINCT )
{
// if we are not finalizing matches, we are using global sorters
// let's try to remove dupes while we are processing data in separate threads
// so that the main thread will have fewer data to work with
m_tUniq.Sort();
VecTraits_T<SphGroupKey_t> dStub;
m_tUniq.Compact(dStub);
// need to clean up matches NOT from m_dIData with current schema
// as after schema change data_ptr attributes will have garbage in ptr part for matches not processed by tProcessor
// and global sorters have differrent clean up code path that do not handle this garbage as usual sorters do
for ( int i = this->m_dIData.GetLength(); i < m_dData.GetLength(); i++ )
{
int iId = *(this->m_dIData.Begin()+i);
CSphMatch & tMatch = m_dData[iId];
m_pSchema->FreeDataPtrs(tMatch);
tMatch.ResetDynamic();
}
}
// just evaluate in heap order
for ( auto iMatch : this->m_dIData )
tProcessor.Process ( &m_dData[iMatch] );
}
void SetMerge ( bool bMerge ) override { m_bMerge = bMerge; }
protected:
template <bool GROUPED>
bool PushIntoExistingGroup( CSphMatch & tGroup, const CSphMatch & tEntry, SphGroupKey_t uGroupKey, SphAttr_t * pAttr )
{
assert ( tGroup.GetAttr ( m_tLocGroupby )==uGroupKey );
assert ( tGroup.m_pDynamic[-1]==tEntry.m_pDynamic[-1] );
auto & tLocCount = m_tLocCount;
if constexpr ( GROUPED )
tGroup.AddCounterAttr ( tLocCount, tEntry );
else
tGroup.AddCounterScalar ( tLocCount, 1 );
if constexpr ( HAS_AGGREGATES )
AggrUpdate ( tGroup, tEntry, GROUPED, m_bMerge );
// if new entry is more relevant, update from it
if ( m_tSubSorter.MatchIsGreater ( tEntry, tGroup ) )
{
if constexpr ( NOTIFICATIONS )
{
m_tJustPushed = RowTagged_t ( tEntry );
this->m_dJustPopped.Add ( RowTagged_t ( tGroup ) );
}
// clone the low part of the match
this->m_tPregroup.CloneKeepingAggrs ( tGroup, tEntry );
if ( pAttr )
UpdateGroupbyStr ( tGroup, pAttr );
}
// submit actual distinct value
if constexpr ( DISTINCT )
if ( m_bUpdateDistinct )
KBufferGroupSorter::template UpdateDistinct<GROUPED> ( tEntry, uGroupKey );
return false; // since it is a dupe
}
/// add entry to the queue
template <bool GROUPED>
FORCE_INLINE bool PushEx ( const CSphMatch & tEntry, const SphGroupKey_t uGroupKey, [[maybe_unused]] bool bNewSet, [[maybe_unused]] bool bTailFinalized, bool bClearNotify, SphAttr_t * pAttr )
{
if constexpr ( NOTIFICATIONS )
{
if ( bClearNotify )
{
m_tJustPushed = RowTagged_t();
this->m_dJustPopped.Resize ( 0 );
}
}
auto & tLocCount = m_tLocCount;
m_bMatchesFinalized = false;
if ( HAS_AGGREGATES && m_bAvgFinal )
CalcAvg ( Avg_e::UNGROUP );
// if this group is already hashed, we only need to update the corresponding match
CSphMatch ** ppMatch = m_hGroup2Match.Find ( uGroupKey );
if ( ppMatch )
{
CSphMatch * pMatch = (*ppMatch);
assert ( pMatch );
assert ( pMatch->GetAttr ( m_tLocGroupby )==uGroupKey );
return PushIntoExistingGroup<GROUPED> ( *pMatch, tEntry, uGroupKey, pAttr );
}
// if we're full, let's cut off some worst groups
if ( Used ()==m_iSize )
CutWorst ( m_iLimit*(int) ( GROUPBY_FACTOR/2 ) );
// submit actual distinct value
if constexpr ( DISTINCT )
if ( m_bUpdateDistinct )
KBufferGroupSorter::template UpdateDistinct<GROUPED> ( tEntry, uGroupKey );
// do add
assert ( Used()<m_iSize );
CSphMatch & tNew = Add();
m_pSchema->CloneMatch ( tNew, tEntry );
if constexpr ( HAS_AGGREGATES )
AggrSetup ( tNew, tEntry, m_bMerge );
if constexpr ( NOTIFICATIONS )
m_tJustPushed = RowTagged_t ( tNew );
if constexpr ( GROUPED )
{
if constexpr ( HAS_AGGREGATES )
AggrUngroup(tNew);
}
else
{
tNew.SetAttr ( m_tLocGroupby, uGroupKey );
tNew.SetAttr ( tLocCount, 1 );
if constexpr ( DISTINCT )
if ( m_bUpdateDistinct )
tNew.SetAttr ( m_tLocDistinct, 0 );
if ( pAttr )
UpdateGroupbyStr ( tNew, pAttr );
}
m_hGroup2Match.Add ( uGroupKey, &tNew );
++m_iTotal;
return true;
}
private:
enum class Avg_e { FINALIZE, UNGROUP };
bool m_bUpdateDistinct = true;
bool m_bMerge = false;
CSphVector<SphGroupKey_t> m_dRemove;
void CalcAvg ( Avg_e eGroup )
{
if ( m_dAvgs.IsEmpty() )
return;
m_bAvgFinal = ( eGroup==Avg_e::FINALIZE );
if ( eGroup==Avg_e::FINALIZE )
for ( auto i : this->m_dIData )
m_dAvgs.Apply( [this,i] ( AggrFunc_i * pAvg ) { pAvg->Finalize ( m_dData[i] ); } );
else
for ( auto i : this->m_dIData )
m_dAvgs.Apply ( [this,i] ( AggrFunc_i * pAvg ) { pAvg->Ungroup ( m_dData[i] ); } );
}
/// finalize counted distinct values
void CountDistinct ()
{
Distinct ( [this] ( SphGroupKey_t uGroup )->CSphMatch *
{
auto ppMatch = m_hGroup2Match.Find ( uGroup );
return ppMatch ? *ppMatch : nullptr;
});
}
// make final order before finalize/flatten call
void FinalizeMatches ( bool bCountDistinct=true )
{
if ( m_bMatchesFinalized )
return;
m_bMatchesFinalized = true;
if ( Used() > m_iLimit )
CutWorst ( m_iLimit, true );
else
{
if constexpr ( DISTINCT )
if ( bCountDistinct )
CountDistinct();
CalcAvg ( Avg_e::FINALIZE );
SortGroups();
}
}
void RebuildHash ()
{
for ( auto iMatch : this->m_dIData ) {
auto & tMatch = m_dData[iMatch];
m_hGroup2Match.Add ( tMatch.GetAttr ( m_tLocGroupby ), &tMatch );
}
}
/// cut worst N groups off the buffer tail, and maybe sort the best part
void CutWorst ( int iBound, bool bFinalize=false )
{
// prepare to partition - finalize distinct, avgs to provide smooth sorting
if constexpr ( DISTINCT )
if ( m_bSortByDistinct )
CountDistinct ();
CalcAvg ( Avg_e::FINALIZE );
// relocate best matches to the low part (up to the iBound)
BinaryPartition (iBound);
// take worst matches and free them (distinct stuff, data ptrs)
auto dWorst = this->m_dIData.Slice ( iBound );
if constexpr ( DISTINCT )
{
m_dRemove.Resize(0);
for ( auto iMatch : dWorst )
m_dRemove.Add ( m_dData[iMatch].GetAttr ( m_tLocGroupby ));
RemoveDistinct ( m_dRemove );
}
dWorst.Apply ( [this,bFinalize] ( int iMatch ) { FreeMatchPtrs ( iMatch, !bFinalize ); } );
m_iMaxUsed = Max ( m_iMaxUsed, this->m_dIData.GetLength() ); // memorize it for free dynamics later.
this->m_dIData.Resize ( iBound );
m_hGroup2Match.Clear();
if ( bFinalize )
{
SortGroups();
if constexpr ( DISTINCT )
if ( !m_bSortByDistinct ) // since they haven't counted at the top
{
RebuildHash(); // distinct uses m_hGroup2Match
CountDistinct();
}
} else
{
// we've called CalcAvg ( Avg_e::FINALIZE ) before partitioning groups
// now we can undo this calculation for the rest apart from thrown away
// on finalize (sorting) cut we don't need to ungroup here
CalcAvg ( Avg_e::UNGROUP );
RebuildHash();
}
}
/// sort groups buffer
void SortGroups ()
{
this->m_dIData.Sort ( m_tGroupSorter );
}
// update @groupbystr value, if available
void UpdateGroupbyStr ( CSphMatch& tMatch, const SphAttr_t * pAttr )
{
if ( this->m_tLocGroupbyStr.m_bDynamic )
tMatch.SetAttr ( this->m_tLocGroupbyStr, *pAttr );
}
// lazy resort matches so that best are located up to iBound
void BinaryPartition ( int iBound )
{
float COEFF = Max ( 1.0f, float(Used()) / iBound );
int iPivot = this->m_dIData[ int(iBound/COEFF) ];
--iBound;
int a=0;
int b=Used()-1;
while (true)
{
int i=a;
int j=b;
while (i<=j)
{
while (m_tGroupSorter.IsLess (this->m_dIData[i],iPivot)) ++i;
while (m_tGroupSorter.IsLess (iPivot, this->m_dIData[j])) --j;
if ( i<=j ) ::Swap( this->m_dIData[i++], this->m_dIData[j--]);
}
if ( iBound == j )
break;
if ( iBound < j)
b = j; // too many elems acquired; continue with left part
else
a = i; // too less elems acquired; continue with right part
int iPivotIndex = int ( ( a * ( COEFF-1 )+b ) / COEFF );
iPivot = this->m_dIData[iPivotIndex];
}
}
};
#define LOG_COMPONENT_NG __FILE__ << ":" << __LINE__ << " -"
#define LOG_LEVEL_DIAG false
#define DBG LOC(DIAG,NG)
/// match sorter with k-buffering and N-best group-by
/* Trick explanation
*
* Here we keep several grouped matches, but each one is not a single match, but a group.
* On the backend we have solid vector of real matches. They are allocated once and freed, and never moved around.
* To work with them, we have vector of indexes, so that each index points to corresponding match in the backend.
* So when performing moving operations (sort, etc.) we actually change indexes and never move matches themselves.
*
* Say, when user pushes matches with weights of 5,2,3,1,4,6, and we then sort them, we will have the following relations:
*
* m5 m2 m3 m1 m4 m6 // backend, placed in natural order as they come here
* 1 2 3 4 5 6 // original indexes, just points directly to backend matches.
*
* After, say, sort by asc matches weights, only index vector modified and became this:
*
* 4 2 3 5 1 6 // reading match[i[k]] for k in 0..5 will return matches in weight ascending order.
*
* When grouping we collect several matches together and sort them.
* Say, if one group contains matches m1, m2, m5, m6 and second - m4, m3, we have to keep 2 sets of matches in hash:
*
* h1: m1 m2 m5 m6
* h2: m4 m3
*
* How to store that sequences?
*
* Well, we can do it directly, set by set, keeping heads in hash:
* m1 m2 m5 m6 m4 m3, heads 1, 5
*
* going to indirection indexes we have sequence
* 4 2 1 6 5 3, hash 1, 4
*
* That looks ok, but since sets can dynamically change, it is hard to insert more into existing group.
* That is like insertion into the middle of vector.
*
* Let's try to make a list (chain). Don't care about in-group ordering, just keep things chained.
* To make things easier, ring the list (connect tail back to head), and store pos of one of the elems in the hash
* (since it is ring - that is not important which exactly, just to have something to glue).
*
* m5 -> 1 heads 1
* m2 -> 2, 1 heads 2
* m3 -> 2, 1, 3, heads 2, 3
* m1 -> 2, 4, 3, 1, heads 4, 3
* m4 -> 2, 4, 5, 1, 3, heads 4, 5
* m6 -> 2, 4, 5, 6, 3, 1 heads 6, 5
*
* On insert, we store old head into new elem, and new elem into the place of old head.
* One thing rest here is indirect ref by position. I.e. we assume that index at position 6 points to match at position 6.
* However, we can notice, that since it is ring, left elem of 6-th points to it directly by number 6.
* So we can just shift heads back by one position - and that's all, indirect assumption no more necessary.
* Final sequence will be this one:
* m5 m2 m3 m1 m4 m6 - matches in their natural order
* 2, 4, 5, 6, 3, 1 - indirection vec. 4, 3. - heads of groups.
*
* Iteration: take 1-st group with head 4:
* 6->1->2->4*. Each num is both index of the link, and index of backend match. So, matches here are:
* m6 m5 m2 m1, and we can resort them as necessary (indirectly). Viola!
*
* On deletion item goes to freelist.
* Allocation of an elem is separate task, it is achieved by linear allocation (first), and by freelist (when filled).
*
*/
template < typename COMPGROUP, typename UNIQ, int DISTINCT, bool NOTIFICATIONS, bool HAS_AGGREGATES >
class CSphKBufferNGroupSorter : public KBufferGroupSorter_T<COMPGROUP,UNIQ,DISTINCT,NOTIFICATIONS>
{
using MYTYPE = CSphKBufferNGroupSorter<COMPGROUP, UNIQ, DISTINCT, NOTIFICATIONS,HAS_AGGREGATES>;
protected:
using KBufferGroupSorter = KBufferGroupSorter_T<COMPGROUP, UNIQ, DISTINCT, NOTIFICATIONS>;
using KBufferGroupSorter::m_eGroupBy;
using KBufferGroupSorter::m_pGrouper;
using KBufferGroupSorter::m_iLimit;
using KBufferGroupSorter::m_tUniq;
using KBufferGroupSorter::m_bSortByDistinct;
using KBufferGroupSorter::m_tGroupSorter;
using KBufferGroupSorter::m_tSubSorter;
using KBufferGroupSorter::m_dAvgs;
using KBufferGroupSorter::GROUPBY_FACTOR;
using KBufferGroupSorter::GetAggregatesWithoutAvgs;
using KBufferGroupSorter::Distinct;
using KBufferGroupSorter::FreeMatchPtrs;
using KBufferGroupSorter::UpdateDistinct;
using KBufferGroupSorter::RemoveDistinct;
using KBufferGroupSorter::m_bAvgFinal;
using CSphGroupSorterSettings::m_tLocGroupby;
using CSphGroupSorterSettings::m_tLocCount;
using CSphGroupSorterSettings::m_tLocDistinct;
// using CSphGroupSorterSettings::m_tLocGroupbyStr; // check! unimplemented?
using BaseGroupSorter_c::EvalHAVING;
using BaseGroupSorter_c::AggrUpdate;
using BaseGroupSorter_c::AggrUngroup;
using CSphMatchQueueTraits::m_iSize;
using CSphMatchQueueTraits::m_dData;
using MatchSorter_c::m_iTotal;
using MatchSorter_c::m_tJustPushed;
using MatchSorter_c::m_pSchema;
public:
/// ctor
CSphKBufferNGroupSorter ( const ISphMatchComparator * pComp, const CSphQuery * pQuery, const CSphGroupSorterSettings & tSettings ) // FIXME! make k configurable
: KBufferGroupSorter ( pComp, pQuery, tSettings )
, m_hGroup2Index ( tSettings.m_iMaxMatches*GROUPBY_FACTOR )
, m_iGLimit ( Min ( pQuery->m_iGroupbyLimit, m_iLimit ) )
{
#ifndef NDEBUG
DBG << "Created iruns = " << m_iruns << " ipushed = " << m_ipushed;
#endif
this->m_dIData.Resize ( m_iSize ); // m_iLimit * GROUPBY_FACTOR
}
inline void SetGLimit ( int iGLimit ) { m_iGLimit = Min ( iGLimit, m_iLimit ); }
int GetLength() override { return Min ( m_iUsed, m_iLimit ); }
bool Push ( const CSphMatch & tEntry ) override { return PushEx<false> ( tEntry, m_pGrouper->KeyFromMatch(tEntry), false, false, true, nullptr ); }
void Push ( const VecTraits_T<const CSphMatch> & dMatches ) final { assert ( 0 && "Not supported in grouping"); }
bool PushGrouped ( const CSphMatch & tEntry, bool bNewSet ) override { return PushEx<true> ( tEntry, tEntry.GetAttr ( m_tLocGroupby ), bNewSet, false, true, nullptr ); }
/// store all entries into specified location in sorted order, and remove them from queue
int Flatten ( CSphMatch * pTo ) override
{
if ( !GetLength() )
return 0;
if ( !m_bFinalized )
{
FinalizeChains ();
PrepareForExport ();
CountDistinct ();
}
auto fnSwap = [&pTo] ( CSphMatch & tSrc ) { // the writer
Swap ( *pTo, tSrc );
++pTo;
};
const CSphMatch * pBegin = pTo;
for ( auto iHead : m_dFinalizedHeads )
{
CSphMatch & tGroupHead = m_dData[iHead];
if ( !EvalHAVING ( tGroupHead ))
{
DeleteChain ( iHead, false );
continue;
}
fnSwap ( tGroupHead ); // move top group match
for ( int i=this->m_dIData[iHead]; i!=iHead; i = this->m_dIData[i] )
fnSwap ( m_dData[i] ); // move tail matches
}
// final clean up before possible next pass
m_uLastGroupKey = -1;
m_iFree = 0;
m_iUsed = 0;
m_bFinalized = false;
m_iStorageSolidFrom = 0;
m_iTotal = 0;
m_dFinalizedHeads.Reset ();
m_hGroup2Index.Clear();
if constexpr ( DISTINCT )
m_tUniq.Reset();
return int ( pTo-pBegin );
}
void Finalize ( MatchProcessor_i & tProcessor, bool, bool bFinalizeMatches ) override
{
if ( !GetLength() )
return;
if ( bFinalizeMatches )
{
if ( !m_bFinalized )
{
FinalizeChains();
PrepareForExport();
CountDistinct();
}
ProcessData ( tProcessor, m_dFinalizedHeads );
}
else
{
ProcessData ( tProcessor, GetAllHeads() );
if constexpr ( DISTINCT )
{
// if we are not finalizing matches, we are using global sorters
// let's try to remove dupes while we are processing data in separate threads
// so that the main thread will have fewer data to work with
m_tUniq.Sort();
VecTraits_T<SphGroupKey_t> dStub;
m_tUniq.Compact(dStub);
}
}
}
// TODO! TEST!
ISphMatchSorter * Clone () const override
{
auto* pClone = this->template CloneSorterT<MYTYPE>();
pClone->SetGLimit (m_iGLimit);
return pClone;
}
void MoveTo ( ISphMatchSorter * pRhs, bool bCopyMeta ) final
{
#ifndef NDEBUG
DBG << " MoveTo " << pRhs << " iRuns:iPushed - " << m_iruns << " " << m_ipushed;
#endif
auto& dRhs = *(MYTYPE *) pRhs;
if ( !dRhs.m_iTotal )
{
DBG << " Rhs is empty, adopt! ";
CSphMatchQueueTraits::SwapMatchQueueTraits ( dRhs );
dRhs.m_hGroup2Index = std::move ( m_hGroup2Index );
::Swap ( m_uLastGroupKey, dRhs.m_uLastGroupKey );
::Swap ( m_iFree, dRhs.m_iFree );
::Swap ( m_iUsed, dRhs.m_iUsed );
::Swap ( m_bFinalized, dRhs.m_bFinalized );
m_dFinalizedHeads.SwapData ( dRhs.m_dFinalizedHeads );
::Swap ( m_iStorageSolidFrom, dRhs.m_iStorageSolidFrom );
#ifndef NDEBUG
::Swap ( m_iruns, dRhs.m_iruns );
::Swap ( m_ipushed, dRhs.m_ipushed );
LOC_SWAP(dRhs);
#endif
if ( !m_bFinalized && bCopyMeta )
dRhs.m_tUniq = std::move(m_tUniq);
return;
}
bool bUniqUpdated = false;
if ( !m_bFinalized && bCopyMeta )
{
m_tUniq.CopyTo ( dRhs.m_tUniq );
bUniqUpdated = true;
}
if ( !m_bFinalized )
{
FinalizeChains();
// PrepareForExport(); // for moving we not need fine-finaled matches; just cleaned is enough
CountDistinct();
}
dRhs.m_bUpdateDistinct = !bUniqUpdated;
dRhs.SetMerge(true);
auto iTotal = dRhs.m_iTotal;
for ( auto iHead : m_dFinalizedHeads )
{
auto uGroupKey = m_dData[iHead].GetAttr ( m_tLocGroupby );
// have to set bNewSet to true
// as need to fallthrough at PushAlreadyHashed and update count and aggregates values for head match
// even uGroupKey match already exists
dRhs.template PushEx<true> ( m_dData[iHead], uGroupKey, true, true, true, nullptr );
for ( int i = this->m_dIData[iHead]; i!=iHead; i = this->m_dIData[i] )
dRhs.template PushEx<false> ( m_dData[i], uGroupKey, false, true, true, nullptr );
DeleteChain ( iHead, false );
}
dRhs.m_bUpdateDistinct = true;
dRhs.SetMerge(false);
dRhs.m_iTotal = m_iTotal+iTotal;
}
void SetMerge ( bool bMerge ) override { m_bMerge = bMerge; }
protected:
int m_iStorageSolidFrom = 0; // edge from witch storage is not yet touched and need no chaining freelist
OpenHashTable_T<SphGroupKey_t, int> m_hGroup2Index; // used to quickly locate group for incoming match
int m_iGLimit; ///< limit per one group
SphGroupKey_t m_uLastGroupKey = -1; ///< helps to determine in pushEx whether the new subgroup started
int m_iFree = 0; ///< current insertion point
int m_iUsed = 0;
// final cached data valid when everything is finalized
bool m_bFinalized = false; // helper to avoid double work
CSphVector<int> m_dFinalizedHeads; /// < sorted finalized heads
int m_iLastGroupCutoff; /// < cutoff edge of last group to fit limit
#ifndef NDEBUG
int m_iruns = 0; ///< helpers for conditional breakpoints on debug
int m_ipushed = 0;
#endif
LOC_ADD;
/*
* Every match according to uGroupKey came to own subset.
* Head match of each group stored in the hash to quickly locate on next pushes
* It hold all calculated stuff from aggregates/group_concat until finalization.
*/
template <bool GROUPED>
bool PushEx ( const CSphMatch & tEntry, const SphGroupKey_t uGroupKey, bool bNewSet, bool bTailFinalized, bool bClearNotify, [[maybe_unused]] SphAttr_t * pAttr )
{
#ifndef NDEBUG
++m_ipushed;
DBG << "PushEx: tag" << tEntry.m_iTag << ",g" << uGroupKey << ": pushed" << m_ipushed
<< " g" << GROUPED << " n" << bNewSet;
#endif
if constexpr ( NOTIFICATIONS )
{
if ( bClearNotify )
{
m_tJustPushed = RowTagged_t();
this->m_dJustPopped.Resize ( 0 );
}
}
this->m_bFinalized = false;
if ( HAS_AGGREGATES && m_bAvgFinal )
CalcAvg ( Avg_e::UNGROUP );
// place elem into the set
auto iNew = AllocateMatch ();
CSphMatch & tNew = m_dData[iNew];
// if such group already hashed
int * pGroupIdx = m_hGroup2Index.Find ( uGroupKey );
if ( pGroupIdx )
return PushAlreadyHashed<GROUPED> ( pGroupIdx, iNew, tEntry, uGroupKey, bNewSet, bTailFinalized );
// match came from MoveTo of another sorter, it is tail, and it has no group here (m.b. it is already
// deleted during finalization as one of worst). Just discard the whole group in the case.
if ( bTailFinalized && !GROUPED )
{
DeallocateMatch ( iNew );
return false;
}
m_pSchema->CloneMatch ( tNew, tEntry ); // fixme! check if essential data cloned
// else
// this->m_tPregroup.CloneWithoutAggrs ( tNew, tEntry );
// this->m_tPregroup.CopyAggrs ( tNew, tEntry );
// submit actual distinct value in all cases
if constexpr ( DISTINCT )
if ( m_bUpdateDistinct )
KBufferGroupSorter::template UpdateDistinct<GROUPED> ( tNew, uGroupKey );
if constexpr ( NOTIFICATIONS )
m_tJustPushed = RowTagged_t ( tNew );
this->m_dIData[iNew] = iNew; // new head - points to self (0-ring)
Verify ( m_hGroup2Index.Add ( uGroupKey, iNew ));
++m_iTotal;
if constexpr ( GROUPED )
{
m_uLastGroupKey = uGroupKey;
if constexpr ( HAS_AGGREGATES )
AggrUngroup ( m_dData[iNew] );
} else
{
tNew.SetAttr ( m_tLocGroupby, uGroupKey );
tNew.SetAttr ( m_tLocCount, 1 );
if constexpr ( DISTINCT )
tNew.SetAttr ( m_tLocDistinct, 0 );
}
return true;
}
private:
bool m_bUpdateDistinct = true;
bool m_bMerge = false;
// surely give place for a match (do vacuum-cleaning, if there is no place)
inline int AllocateMatch ()
{
auto iPlace = TryAllocateMatch ();
if ( iPlace<0 )
{
VacuumClean ();
iPlace = TryAllocateMatch ();
}
assert ( iPlace>=0 && iPlace<m_iSize );
DBG << "allocated: " << iPlace;
return iPlace;
}
// return match and free it's dataptrs
FORCE_INLINE void FreeMatch ( int iElem, bool bNotify ) // fixme! intersects with parent by name
{
FreeMatchPtrs ( iElem, bNotify );
DeallocateMatch ( iElem );
}
inline int TryAllocateMatch ()
{
if ( m_iUsed==m_iSize )
return -1; // no more place..
++m_iUsed;
auto iElem = m_iFree;
if ( iElem<m_iStorageSolidFrom )
m_iFree = this->m_dIData[iElem];
else {
++m_iFree;
m_iStorageSolidFrom = m_iFree;
}
return iElem;
}
inline void DeallocateMatch (int iElem)
{
--m_iUsed;
this->m_dIData[iElem] = m_iFree; // put to chain
m_iFree = iElem;
assert ( m_iFree >=0 );
}
// return length of the matches chain (-1 terminated)
int ChainLen ( int iPos ) const
{
int iChainLen = 1;
for ( int i = this->m_dIData[iPos]; i!=iPos; i = this->m_dIData[i] )
++iChainLen;
return iChainLen;
}
// add new match into the chain. Aggregates are relaxed and not managed till finalize
/*
* chain of the matches is actually ring of integers. Each one points to the coherent
* match in the storage, and simultaneously next member of the ring.
* We can iterate over the chain starting from the head and looking until same index met again.
*/
void AddToChain ( int iNew, const CSphMatch & tEntry, int iHead )
{
CSphMatch & tNew = m_dData[iNew];
this->m_tPregroup.CloneWithoutAggrs ( tNew, tEntry );
if constexpr ( NOTIFICATIONS )
m_tJustPushed = RowTagged_t ( tNew );
// put after the head
auto iPrevChain = this->m_dIData[iHead];
this->m_dIData[iNew] = iPrevChain;
this->m_dIData[iHead] = iNew;
}
// add entry to existing group
/*
* If group is not full, and new match is less than head, it will replace the head.
* calculated stuff will be moved and adopted by this new replacement.
* If group is full, and new match is less than head, it will be early rejected.
* In all other cases new match will be inserted into the group right after head
*/
template <bool GROUPED>
bool PushAlreadyHashed ( int * pHead, int iNew, const CSphMatch & tEntry, const SphGroupKey_t uGroupKey, bool bNewSet, bool bTailFinalized )
{
int & iHead = *pHead;
assert ( m_dData[iHead].GetAttr ( m_tLocGroupby )==uGroupKey );
assert ( m_dData[iHead].m_pDynamic[-1]==tEntry.m_pDynamic[-1] );
DBG << "existing " << m_dData[iHead].m_iTag << "," << uGroupKey
<< " m_pDynamic: " << m_dData[iHead].m_pDynamic;
// check if we need to push the match at all
if ( m_tSubSorter.MatchIsGreater ( tEntry, m_dData[iHead] ) )
AddToChain ( iNew, tEntry, iHead ); // always add; bad will be filtered later in gc
else if ( ChainLen ( iHead )>=m_iGLimit ) // less than worst, drop it
DeallocateMatch ( iNew );
else
{
AddToChain ( iNew, tEntry, iHead );
this->m_tPregroup.MoveAggrs ( m_dData[iNew], m_dData[iHead] );
*pHead = iNew;
}
auto & tHeadMatch = m_dData[iHead];
// submit actual distinct value in all cases
if constexpr ( DISTINCT )
if ( m_bUpdateDistinct )
KBufferGroupSorter::template UpdateDistinct<GROUPED> ( tEntry, uGroupKey );
// update group-wide counters
auto & tLocCount = m_tLocCount;
if constexpr ( GROUPED )
{
// it's already grouped match
// sum grouped matches count
if ( bNewSet || uGroupKey!=m_uLastGroupKey )
{
tHeadMatch.AddCounterAttr ( tLocCount, tEntry );
m_uLastGroupKey = uGroupKey;
bNewSet = true;
}
} else if ( !bTailFinalized )
{
// it's a simple match
// increase grouped matches count
tHeadMatch.AddCounterScalar ( tLocCount, 1 );
bNewSet = true;
}
// update aggregates
if constexpr ( HAS_AGGREGATES )
{
if ( bNewSet )
AggrUpdate ( tHeadMatch, tEntry, GROUPED, m_bMerge );
}
// since it is dupe (i.e. such group is already pushed) - return false;
return false;
}
enum class Avg_e { FINALIZE, UNGROUP };
void CalcAvg ( Avg_e eGroup )
{
if ( this->m_dAvgs.IsEmpty() )
return;
m_bAvgFinal = ( eGroup==Avg_e::FINALIZE );
int64_t i = 0;
if ( eGroup==Avg_e::FINALIZE )
for ( auto tData = m_hGroup2Index.Iterate(i); tData.second; tData = m_hGroup2Index.Iterate(i) )
m_dAvgs.Apply ( [this, &tData] ( AggrFunc_i * pAvg ) {
pAvg->Finalize ( m_dData[*tData.second] );
});
else
for ( auto tData = m_hGroup2Index.Iterate(i); tData.second; tData = m_hGroup2Index.Iterate(i) )
m_dAvgs.Apply ( [this, &tData] ( AggrFunc_i * pAvg ) {
pAvg->Ungroup ( m_dData[*tData.second] );
});
}
void BinaryPartitionTail ( VecTraits_T<int>& dData, int iBound )
{
--iBound;
int iPivot = dData[iBound];
int a = 0;
int b = dData.GetLength ()-1;
while (true) {
int i = a;
int j = b;
while (i<=j) {
while ( m_tSubSorter.IsLess ( dData[i], iPivot )) ++i;
while ( m_tSubSorter.IsLess ( iPivot, dData[j] )) --j;
if ( i<=j ) ::Swap ( dData[i++], dData[j--] );
}
if ( iBound==j )
break;
if ( iBound<j )
b = j; // too many elems acquired; continue with left part
else
a = i; // too few elems acquired; continue with right part
iPivot = dData[( a+b ) / 2];
}
}
CSphVector<int> GetAllHeads()
{
CSphVector<int> dAllHeads;
dAllHeads.Reserve ( m_hGroup2Index.GetLength ());
int64_t i = 0;
for ( auto tData = m_hGroup2Index.Iterate(i); tData.second; tData = m_hGroup2Index.Iterate(i) )
dAllHeads.Add ( *tData.second );
return dAllHeads;
}
// free place for new matches
void VacuumClean()
{
auto iLimit = m_iLimit * GROUPBY_FACTOR / 2;
// first try to cut out too long tails
int iSize = 0;
int64_t i = 0;
for ( auto tData = m_hGroup2Index.Iterate(i); tData.second; tData = m_hGroup2Index.Iterate(i) )
iSize += VacuumTail ( tData.second, m_iGLimit );
// if we reached the limit now - bail, no need to free more.
if ( iSize<=iLimit )
return;
// if we're here, just vacuuming tails wasn't effective enough and some deeper cleaning necessary
SortThenVacuumWorstHeads ( iLimit );
}
// final pass before iface finalize/flatten - cut worst, sort everything
void FinalizeChains()
{
if ( m_bFinalized )
return;
m_bFinalized = true;
int64_t i = 0;
for ( auto tData = m_hGroup2Index.Iterate(i); tData.second; tData = m_hGroup2Index.Iterate(i) )
VacuumTail ( tData.second, m_iGLimit, Stage_e::FINAL );
// Continue by cut out whole groups
SortThenVacuumWorstHeads ( m_iLimit, Stage_e::FINAL ); // false since it is already sorted
// also free matches in the chain were cleared with FreeDataPtrs, but *now* we also need to free their dynamics
// otherwise in d-tr FreDataPtr on non-zero dynamics will be called again with probably another schema and crash
// FIXME!!! need to keep and restore all members changed by TryAllocateMatch - it'd be better to rewrite code to pass state into TryAllocateMatch or use common code
auto iFree = m_iFree;
auto iUsed = m_iUsed;
auto iSSFrom = m_iStorageSolidFrom;
for ( auto iElem = TryAllocateMatch (); iElem>=0; iElem = TryAllocateMatch () )
m_dData[iElem].ResetDynamic ();
m_iFree = iFree;
m_iUsed = iUsed;
m_iStorageSolidFrom = iSSFrom;
}
/*
* Here we
* 1) Cut off very last head if it would exceed the limit.
* 1) Copy all calculated stuff (aggr attributes) from head match to every other match of a group
* 2) Sort group in decreasing order, and then shift the ring ahead to 1 match.
* That is necessary since head is worst match, and next after it is the best one (since just sorted)
* Since it is ring, by moving ahead we will have 1-st match the best, last - the worst.
*/
void PrepareForExport()
{
VacuumTail ( &m_dFinalizedHeads.Last(), m_iLastGroupCutoff, Stage_e::FINAL );
auto dAggrs = GetAggregatesWithoutAvgs ();
for ( auto& iHead : m_dFinalizedHeads )
{
for ( auto * pAggr : dAggrs )
pAggr->Finalize ( m_dData[iHead] );
PropagateAggregates ( iHead );
iHead = this->m_dIData[iHead]; // shift
}
}
void PropagateAggregates ( int iHead )
{
for ( auto i = this->m_dIData[iHead]; i!=iHead; i = this->m_dIData[i] )
this->m_tPregroup.CopyAggrs ( m_dData[i], m_dData[iHead] );
}
// at collect stage we don't need to strictly sort matches inside groups,
// but we need to track pushed/deleted matches.
// at finalize stage, in opposite, no tracking need, but matches must be sorted.
enum class Stage_e { COLLECT, FINAL };
// sorts by next-to-worst element in the chain
struct FinalGroupSorter_t
{
const GroupSorter_fn<COMPGROUP> & m_tGroupSorter;
const CSphTightVector<int> & m_dIData;
FinalGroupSorter_t ( const GroupSorter_fn<COMPGROUP> & tSorter, const CSphTightVector<int> & dIData )
: m_tGroupSorter ( tSorter )
, m_dIData ( dIData )
{}
bool IsLess ( int a, int b ) const
{
return m_tGroupSorter.IsLess ( m_dIData[a], m_dIData[b] );
}
};
// full clean - sort the groups, then iterate on them until iLimit elems counted. Cut out the rest.
// if last group is not fit into rest of iLimit, it still kept whole, no fraction performed over it.
// returns desired length of the last chain to make the limit hard ( 1..m_iGLimit )
void SortThenVacuumWorstHeads ( int iSoftLimit, Stage_e eStage = Stage_e::COLLECT )
{
m_dFinalizedHeads = GetAllHeads();
CalcAvg ( Avg_e::FINALIZE );
// in this final sort we need to keep the heads but to sort by next-to-head element (which is the best in group)
FinalGroupSorter_t tFinalSorter ( m_tGroupSorter, this->m_dIData );
m_dFinalizedHeads.Sort ( tFinalSorter );
int iRetainMatches = 0;
CSphVector<SphGroupKey_t> dRemovedHeads; // to remove distinct
// delete worst heads
ARRAY_FOREACH ( i, m_dFinalizedHeads )
if ( iSoftLimit > iRetainMatches )
iRetainMatches += ChainLen ( m_dFinalizedHeads[i] );
else
{
// all quota exceeded, the rest just to be cut totally
auto iRemoved = DeleteChain ( m_dFinalizedHeads[i], eStage==Stage_e::COLLECT );
if constexpr ( DISTINCT )
dRemovedHeads.Add( iRemoved );
m_dFinalizedHeads.RemoveFast ( i-- );
}
// discard removed distinct
if constexpr ( DISTINCT )
RemoveDistinct ( dRemovedHeads );
if ( eStage==Stage_e::COLLECT )
CalcAvg ( Avg_e::UNGROUP );
m_iLastGroupCutoff = m_iGLimit+iSoftLimit-iRetainMatches;
}
// for given chain throw out worst elems to fit in iLimit quota.
// Returns length of the chain
int VacuumTail ( int* pHead, int iLimit, Stage_e eStage = Stage_e::COLLECT )
{
assert ( iLimit>0 );
CSphVector<int> dChain;
dChain.Add ( *pHead );
for ( auto i = this->m_dIData[*pHead]; i!=*pHead; i = this->m_dIData[i] )
dChain.Add ( i );
if ( dChain.GetLength()==1 )
return 1; // fast over
auto dWorstTail = dChain.Slice ( iLimit );
// if no sort necessary and limit not exceeded - nothing to do
if ( eStage==Stage_e::COLLECT && dWorstTail.IsEmpty() )
return dChain.GetLength();
// chain need to be shortened
if ( !dWorstTail.IsEmpty() )
{
BinaryPartitionTail ( dChain, iLimit );
dChain.Resize ( iLimit );
}
// sort if necessary and ensure last elem of chain is the worst one
if ( eStage==Stage_e::FINAL )
{
dChain.Sort( m_tSubSorter ); // sorted in reverse order, so the worst match here is the last one.
iLimit = dChain.GetLength();
} else
{
assert ( dChain.GetLength ()==iLimit );
// not sorted, need to find worst match for new head
int iWorst = 0;
for (int i=1; i<iLimit; ++i)
{
if ( m_tSubSorter.IsLess ( dChain[iWorst], dChain[i] ) )
iWorst = i;
}
::Swap ( dChain[iWorst], dChain[iLimit-1] );
}
auto iNewHead = dChain.Last ();
// move calculated aggregates to the new head
if ( iNewHead!=*pHead )
{
SphGroupKey_t uGroupKey = m_dData[*pHead].GetAttr ( m_tLocGroupby );
int * pHeadInHash = m_hGroup2Index.Find(uGroupKey);
assert(pHeadInHash);
this->m_tPregroup.MoveAggrs ( m_dData[iNewHead], m_dData[*pHead] );
*pHead = iNewHead;
*pHeadInHash = iNewHead;
}
// now we can safely free worst matches
for ( auto iWorst : dWorstTail )
FreeMatch ( iWorst, eStage==Stage_e::COLLECT );
// recreate the chain. It is actually ring, and external hash points to the minimal elem
this->m_dIData[iNewHead] = dChain[0]; // head points to begin of chain
for ( int i = 0; i<iLimit-1; ++i ) // each elem points to the next, last again to head
this->m_dIData[dChain[i]] = dChain[i+1];
return iLimit;
}
// delete whole chain (and remove from hash also).
SphGroupKey_t DeleteChain ( int iPos, bool bNotify )
{
SphGroupKey_t uGroupKey = m_dData[iPos].GetAttr ( m_tLocGroupby );
m_hGroup2Index.Delete ( uGroupKey );
int iNext = this->m_dIData[iPos];
FreeMatch ( iPos, bNotify );
for ( auto i = iNext; i!=iPos; i = iNext )
{
iNext = this->m_dIData[i];
FreeMatch ( i, bNotify );
}
return uGroupKey;
}
/// count distinct values if necessary
void CountDistinct ()
{
if constexpr ( DISTINCT )
Distinct ( [this] ( SphGroupKey_t uGroup )->CSphMatch *
{
auto pIdx = m_hGroup2Index.Find ( uGroup );
return pIdx? &m_dData[*pIdx] : nullptr;
});
}
void ProcessData ( MatchProcessor_i & tProcessor, const IntVec_t & dHeads )
{
for ( auto iHead : dHeads )
{
tProcessor.Process ( &m_dData[iHead] ); // process top group match
for ( int i = this->m_dIData[iHead]; i!=iHead; i = this->m_dIData[i] )
tProcessor.Process ( &m_dData[i] ); // process tail matches
}
}
};
/////////////////////////////////////////////////////////////////////
/// generic match sorter that understands groupers that return multiple keys per match
template < typename T >
class MultiValueGroupSorterTraits_T : public T
{
using BASE = T;
public:
MultiValueGroupSorterTraits_T ( const ISphMatchComparator * pComp, const CSphQuery * pQuery, const CSphGroupSorterSettings & tSettings )
: T ( pComp, pQuery, tSettings )
{}
bool Push ( const CSphMatch & tMatch ) override
{
this->m_pGrouper->MultipleKeysFromMatch ( tMatch, m_dKeys );
bool bRes = false;
ARRAY_FOREACH ( i, m_dKeys )
{
SphGroupKey_t tKey = m_dKeys[i];
// need to clear notifications once per match - not for every pushed value
bRes |= BASE::template PushEx<false> ( tMatch, tKey, false, false, ( i==0 ), nullptr );
}
return bRes;
}
bool PushGrouped ( const CSphMatch & tEntry, bool bNewSet ) override
{
return BASE::template PushEx<true> ( tEntry, tEntry.GetAttr ( BASE::m_tLocGroupby ), bNewSet, false, true, nullptr );
}
private:
CSphVector<SphGroupKey_t> m_dKeys;
};
template < typename COMPGROUP, typename UNIQ, int DISTINCT, bool NOTIFICATIONS, bool HAS_AGGREGATES >
class MultiValueGroupSorter_T : public MultiValueGroupSorterTraits_T <CSphKBufferGroupSorter <COMPGROUP, UNIQ, DISTINCT, NOTIFICATIONS, HAS_AGGREGATES>>
{
using BASE = MultiValueGroupSorterTraits_T <CSphKBufferGroupSorter < COMPGROUP, UNIQ, DISTINCT, NOTIFICATIONS, HAS_AGGREGATES>>;
using MYTYPE = MultiValueGroupSorter_T < COMPGROUP, UNIQ, DISTINCT, NOTIFICATIONS, HAS_AGGREGATES >;
public:
using BASE::BASE;
ISphMatchSorter * Clone () const final { return this->template CloneSorterT<MYTYPE>(); }
};
template < typename COMPGROUP, typename UNIQ, int DISTINCT, bool NOTIFICATIONS, bool HAS_AGGREGATES >
class MultiValueNGroupSorter_T : public MultiValueGroupSorterTraits_T < CSphKBufferNGroupSorter<COMPGROUP, UNIQ, DISTINCT, NOTIFICATIONS, HAS_AGGREGATES>>
{
using BASE = MultiValueGroupSorterTraits_T <CSphKBufferNGroupSorter < COMPGROUP, UNIQ, DISTINCT, NOTIFICATIONS, HAS_AGGREGATES>>;
using MYTYPE = MultiValueNGroupSorter_T <COMPGROUP, UNIQ, DISTINCT, NOTIFICATIONS, HAS_AGGREGATES>;
public:
using BASE::BASE;
ISphMatchSorter * Clone () const final
{
auto * pClone = this->template CloneSorterT<MYTYPE>();
pClone->SetGLimit (this->m_iGLimit);
return pClone;
}
};
/////////////////////////////////////////////////////////////////////
/// match sorter with k-buffering and group-by for JSON arrays
template < typename COMPGROUP, typename UNIQ, int DISTINCT, bool NOTIFICATIONS, bool HAS_AGGREGATES >
class CSphKBufferJsonGroupSorter : public CSphKBufferGroupSorter < COMPGROUP, UNIQ, DISTINCT, NOTIFICATIONS, HAS_AGGREGATES >
{
public:
using BASE = CSphKBufferGroupSorter<COMPGROUP, UNIQ, DISTINCT, NOTIFICATIONS, HAS_AGGREGATES>;
using MYTYPE = CSphKBufferJsonGroupSorter<COMPGROUP, UNIQ, DISTINCT, NOTIFICATIONS, HAS_AGGREGATES>;
// since we inherit from template, we need to write boring 'using' block
using KBufferGroupSorter = KBufferGroupSorter_T<COMPGROUP, UNIQ, DISTINCT, NOTIFICATIONS>;
using KBufferGroupSorter::m_eGroupBy;
using KBufferGroupSorter::m_iLimit;
using KBufferGroupSorter::m_tSubSorter;
/// ctor
FWD_BASECTOR( CSphKBufferJsonGroupSorter )
bool Push ( const CSphMatch & tEntry ) final { return PushMatch(tEntry); }
void Push ( const VecTraits_T<const CSphMatch> & dMatches ) final { assert ( 0 && "Not supported in grouping"); }
/// add pre-grouped entry to the queue
bool PushGrouped ( const CSphMatch & tEntry, bool bNewSet ) override
{
// re-group it based on the group key
return BASE::template PushEx<true> ( tEntry, tEntry.GetAttr ( BASE::m_tLocGroupby ), bNewSet, false, true, nullptr );
}
ISphMatchSorter * Clone () const final
{
return this->template CloneSorterT<MYTYPE>();
}
private:
FORCE_INLINE bool PushMatch ( const CSphMatch & tMatch )
{
SphGroupKey_t uGroupKey = this->m_pGrouper->KeyFromMatch ( tMatch );
const BYTE * pBlobPool = this->m_pGrouper->GetBlobPool();
bool bClearNotify = true;
return PushJsonField ( uGroupKey, pBlobPool, [this, &tMatch, &bClearNotify]( SphAttr_t * pAttr, SphGroupKey_t uMatchGroupKey )
{
bool bPushed = BASE::template PushEx<false> ( tMatch, uMatchGroupKey, false, false, bClearNotify, pAttr );
bClearNotify = false; // need to clear notifications once per match - not for every pushed value
return bPushed;
}
);
}
};
/// implicit group-by sorter
/// invoked when no 'group-by', but count(*) or count(distinct attr) are in game
template < typename COMPGROUP, typename UNIQ, int DISTINCT, bool NOTIFICATIONS, bool HAS_AGGREGATES>
class CSphImplicitGroupSorter final : public MatchSorter_c, ISphNoncopyable, protected BaseGroupSorter_c
{
using MYTYPE = CSphImplicitGroupSorter<COMPGROUP, UNIQ, DISTINCT, NOTIFICATIONS, HAS_AGGREGATES>;
using BASE = MatchSorter_c;
public:
CSphImplicitGroupSorter ( const ISphMatchComparator * DEBUGARG(pComp), const CSphQuery *, const CSphGroupSorterSettings & tSettings )
: BaseGroupSorter_c ( tSettings )
{
assert ( !DISTINCT || tSettings.m_pDistinctFetcher );
assert ( !pComp );
if constexpr ( NOTIFICATIONS )
m_dJustPopped.Reserve(1);
m_iMatchCapacity = 1;
m_pDistinctFetcher = tSettings.m_pDistinctFetcher;
}
/// schema setup
void SetSchema ( ISphSchema * pSchema, bool bRemapCmp ) final
{
if ( m_pSchema )
{
FixupLocators ( m_pSchema, pSchema );
m_tPregroup.ResetAttrs ();
m_dAggregates.Apply ( [] ( AggrFunc_i * pAggr ) {SafeDelete ( pAggr ); } );
m_dAggregates.Resize ( 0 );
}
BASE::SetSchema ( pSchema, bRemapCmp );
SetupBaseGrouper ( pSchema, DISTINCT );
}
bool IsGroupby () const final { return true; }
void SetBlobPool ( const BYTE * pBlobPool ) final
{
BlobPool_c::SetBlobPool ( pBlobPool );
if ( m_pDistinctFetcher )
m_pDistinctFetcher->SetBlobPool(pBlobPool);
}
void SetColumnar ( columnar::Columnar_i * pColumnar ) final
{
BASE::SetColumnar(pColumnar);
BaseGroupSorter_c::SetColumnar(pColumnar);
if ( m_pDistinctFetcher )
m_pDistinctFetcher->SetColumnar(pColumnar);
}
bool IsCutoffDisabled() const final { return true; }
bool Push ( const CSphMatch & tEntry ) final { return PushEx<false>(tEntry); }
void Push ( const VecTraits_T<const CSphMatch> & dMatches ) final { assert ( 0 && "Not supported in grouping"); }
bool PushGrouped ( const CSphMatch & tEntry, bool ) final { return PushEx<true>(tEntry); }
/// store all entries into specified location in sorted order, and remove them from queue
int Flatten ( CSphMatch * pTo ) final
{
assert ( m_bDataInitialized );
CountDistinct ();
if constexpr ( HAS_AGGREGATES )
{
for ( auto * pAggregate : m_dAggregates )
pAggregate->Finalize ( m_tData );
}
int iCopied = 0;
if ( EvalHAVING ( m_tData ) )
{
iCopied = 1;
Swap ( *pTo, m_tData );
} else
{
m_pSchema->FreeDataPtrs ( m_tData );
m_tData.ResetDynamic ();
}
m_iTotal = 0;
m_bDataInitialized = false;
if constexpr ( DISTINCT )
m_tUniq.Reset();
return iCopied;
}
/// finalize, perform final sort/cut as needed
void Finalize ( MatchProcessor_i & tProcessor, bool, bool bFinalizeMatches ) final
{
if ( !GetLength() )
return;
tProcessor.Process ( &m_tData );
if ( !bFinalizeMatches )
m_tUniq.Compact();
}
int GetLength() final { return m_bDataInitialized ? 1 : 0; }
bool CanBeCloned() const final { return !DISTINCT && BASE::CanBeCloned(); }
// TODO! test.
ISphMatchSorter * Clone () const final
{
auto pClone = new MYTYPE ( nullptr, nullptr, *this );
CloneTo ( pClone );
pClone->SetupBaseGrouperWrp ( pClone->m_pSchema );
if ( m_pDistinctFetcher )
pClone->m_pDistinctFetcher = m_pDistinctFetcher->Clone();
return pClone;
}
void MoveTo ( ISphMatchSorter * pRhs, bool bCopyMeta ) final
{
if (!m_bDataInitialized)
return;
auto& dRhs = *(MYTYPE *) pRhs;
if ( !dRhs.m_bDataInitialized )
{
// ISphMatchSorter
::Swap ( m_iTotal, dRhs.m_iTotal );
::Swap ( m_tData, dRhs.m_tData );
::Swap ( m_bDataInitialized, dRhs.m_bDataInitialized );
if ( bCopyMeta )
dRhs.m_tUniq = std::move ( m_tUniq );
return;
}
if ( bCopyMeta )
m_tUniq.CopyTo ( dRhs.m_tUniq );
// other step is a bit tricky:
// we just can't add current count uniq to final; need to append m_tUniq instead,
// so that final flattening will calculate real uniq count.
dRhs.AddCount ( m_tData );
if constexpr ( HAS_AGGREGATES )
dRhs.UpdateAggregates ( m_tData, false, true );
if constexpr ( DISTINCT )
if ( !bCopyMeta )
dRhs.UpdateDistinct ( m_tData );
}
void SetMerge ( bool bMerge ) override { m_bMerge = bMerge; }
protected:
CSphMatch m_tData;
bool m_bDataInitialized = false;
bool m_bMerge = false;
UNIQ m_tUniq;
private:
CSphVector<SphAttr_t> m_dDistinctKeys;
CSphRefcountedPtr<DistinctFetcher_i> m_pDistinctFetcher;
inline void SetupBaseGrouperWrp ( ISphSchema * pSchema ) { SetupBaseGrouper ( pSchema, DISTINCT ); }
void AddCount ( const CSphMatch & tEntry ) { m_tData.AddCounterAttr ( m_tLocCount, tEntry ); }
void UpdateAggregates ( const CSphMatch & tEntry, bool bGrouped = true, bool bMerge = false ) { AggrUpdate ( m_tData, tEntry, bGrouped, bMerge ); }
void SetupAggregates ( const CSphMatch & tEntry ) { AggrSetup ( m_tData, tEntry, m_bMerge ); }
// submit actual distinct value in all cases
template <bool GROUPED = true>
void UpdateDistinct ( const CSphMatch & tEntry )
{
int iCount = 1;
if constexpr ( GROUPED )
iCount = (int) tEntry.GetAttr ( m_tLocDistinct );
if constexpr ( DISTINCT==1 )
m_tUniq.Add ( { 0, m_pDistinctFetcher->GetKey(tEntry), iCount } );
else
{
m_pDistinctFetcher->GetKeys ( tEntry, m_dDistinctKeys );
for ( auto i : m_dDistinctKeys )
this->m_tUniq.Add ( { 0, i, iCount } );
}
}
/// add entry to the queue
template <bool GROUPED>
FORCE_INLINE bool PushEx ( const CSphMatch & tEntry )
{
if constexpr ( NOTIFICATIONS )
{
m_tJustPushed = RowTagged_t();
m_dJustPopped.Resize(0);
}
if ( m_bDataInitialized )
{
assert ( m_tData.m_pDynamic[-1]==tEntry.m_pDynamic[-1] );
if constexpr ( GROUPED )
{
// it's already grouped match
// sum grouped matches count
AddCount ( tEntry );
} else
{
// it's a simple match
// increase grouped matches count
m_tData.AddCounterScalar ( m_tLocCount, 1 );
}
// update aggregates
if constexpr ( HAS_AGGREGATES )
UpdateAggregates ( tEntry, GROUPED, m_bMerge );
}
if constexpr ( DISTINCT )
UpdateDistinct<GROUPED> ( tEntry );
// it's a dupe anyway, so we shouldn't update total matches count
if ( m_bDataInitialized )
return false;
// add first
m_pSchema->CloneMatch ( m_tData, tEntry );
// first-time aggregate setup
if constexpr ( HAS_AGGREGATES )
SetupAggregates(tEntry);
if constexpr ( NOTIFICATIONS )
m_tJustPushed = RowTagged_t ( m_tData );
if constexpr ( !GROUPED )
{
m_tData.SetAttr ( m_tLocGroupby, 1 ); // fake group number
m_tData.SetAttr ( m_tLocCount, 1 );
if constexpr ( DISTINCT )
m_tData.SetAttr ( m_tLocDistinct, 0 );
}
else
{
if constexpr ( HAS_AGGREGATES )
AggrUngroup ( m_tData );
}
m_bDataInitialized = true;
++m_iTotal;
return true;
}
/// count distinct values if necessary
void CountDistinct ()
{
if constexpr ( !DISTINCT )
return;
assert ( m_bDataInitialized );
m_tData.SetAttr ( m_tLocDistinct, m_tUniq.CountDistinct() );
}
};
///////////////////////////////////////////////////////////////////////////////
#define CREATE_SORTER_4TH(SORTER,COMPGROUP,UNIQ,COMP,QUERY,SETTINGS,HAS_PACKEDFACTORS,HAS_AGGREGATES) \
{ \
int iMultiDistict = 0; \
if ( tSettings.m_bDistinct ) \
{ \
assert(tSettings.m_pDistinctFetcher); \
iMultiDistict = tSettings.m_pDistinctFetcher->IsMultiValue() ? 2 : 1; \
} \
BYTE uSelector = 4*iMultiDistict + 2*(bHasPackedFactors?1:0) + (HAS_AGGREGATES?1:0); \
switch ( uSelector ) \
{ \
case 0: return new SORTER<COMPGROUP,UNIQ,0, false,false> ( pComp, pQuery, tSettings ); \
case 1: return new SORTER<COMPGROUP,UNIQ,0, false,true> ( pComp, pQuery, tSettings ); \
case 2: return new SORTER<COMPGROUP,UNIQ,0, true, false> ( pComp, pQuery, tSettings ); \
case 3: return new SORTER<COMPGROUP,UNIQ,0, true, true> ( pComp, pQuery, tSettings ); \
case 4: return new SORTER<COMPGROUP,UNIQ,1, false,false> ( pComp, pQuery, tSettings ); \
case 5: return new SORTER<COMPGROUP,UNIQ,1, false,true> ( pComp, pQuery, tSettings ); \
case 6: return new SORTER<COMPGROUP,UNIQ,1, true, false> ( pComp, pQuery, tSettings ); \
case 7: return new SORTER<COMPGROUP,UNIQ,1, true, true> ( pComp, pQuery, tSettings ); \
case 8: return new SORTER<COMPGROUP,UNIQ,2, false,false> ( pComp, pQuery, tSettings ); \
case 9: return new SORTER<COMPGROUP,UNIQ,2, false,true> ( pComp, pQuery, tSettings ); \
case 10:return new SORTER<COMPGROUP,UNIQ,2, true, false> ( pComp, pQuery, tSettings ); \
case 11:return new SORTER<COMPGROUP,UNIQ,2, true, true> ( pComp, pQuery, tSettings ); \
default: assert(0); return nullptr; \
} \
}
template < typename COMPGROUP >
static ISphMatchSorter * CreateGroupSorter ( const ISphMatchComparator * pComp, const CSphQuery * pQuery, const CSphGroupSorterSettings & tSettings, bool bHasPackedFactors, bool bHasAggregates, const PrecalculatedSorterResults_t & tPrecalc )
{
ISphMatchSorter * pPrecalcSorter = CreatePrecalcSorter ( tPrecalc, tSettings );
if ( pPrecalcSorter )
return pPrecalcSorter;
bool bUseHLL = tSettings.m_iDistinctAccuracy > 0;
using Uniq_c = UniqGrouped_T<ValueWithGroup_t>;
using UniqSingle_c = UniqSingle_T<SphAttr_t>;
using UniqCount_c = UniqGrouped_T<ValueWithGroupCount_t>;
using UniqCountSingle_c = UniqSingle_T<ValueWithCount_t>;
BYTE uSelector3rd = 32*( bUseHLL ? 1 : 0 ) + 16*( tSettings.m_bGrouped ? 1:0 ) + 8*( tSettings.m_bJson ? 1:0 ) + 4*( pQuery->m_iGroupbyLimit>1 ? 1:0 ) + 2*( tSettings.m_bImplicit ? 1:0 ) + ( ( tSettings.m_pGrouper && tSettings.m_pGrouper->IsMultiValue() ) ? 1:0 );
switch ( uSelector3rd )
{
case 0: CREATE_SORTER_4TH ( CSphKBufferGroupSorter, COMPGROUP, Uniq_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 1: CREATE_SORTER_4TH ( MultiValueGroupSorter_T, COMPGROUP, Uniq_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 2: CREATE_SORTER_4TH ( CSphImplicitGroupSorter, COMPGROUP, UniqSingle_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 4: CREATE_SORTER_4TH ( CSphKBufferNGroupSorter, COMPGROUP, Uniq_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 5: CREATE_SORTER_4TH ( MultiValueNGroupSorter_T, COMPGROUP, Uniq_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 8: CREATE_SORTER_4TH ( CSphKBufferJsonGroupSorter, COMPGROUP, Uniq_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 16:CREATE_SORTER_4TH ( CSphKBufferGroupSorter, COMPGROUP, UniqCount_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 17:CREATE_SORTER_4TH ( MultiValueGroupSorter_T, COMPGROUP, UniqCount_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 18:CREATE_SORTER_4TH ( CSphImplicitGroupSorter, COMPGROUP, UniqCountSingle_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 20:CREATE_SORTER_4TH ( CSphKBufferNGroupSorter, COMPGROUP, UniqCount_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 21:CREATE_SORTER_4TH ( MultiValueNGroupSorter_T, COMPGROUP, UniqCount_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 24:CREATE_SORTER_4TH ( CSphKBufferJsonGroupSorter, COMPGROUP, UniqCount_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 32:CREATE_SORTER_4TH ( CSphKBufferGroupSorter, COMPGROUP, UniqHLL_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 33:CREATE_SORTER_4TH ( MultiValueGroupSorter_T, COMPGROUP, UniqHLL_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 34:CREATE_SORTER_4TH ( CSphImplicitGroupSorter, COMPGROUP, UniqHLLSingle_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 36:CREATE_SORTER_4TH ( CSphKBufferNGroupSorter, COMPGROUP, UniqHLL_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 37:CREATE_SORTER_4TH ( MultiValueNGroupSorter_T, COMPGROUP, UniqHLL_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 40:CREATE_SORTER_4TH ( CSphKBufferJsonGroupSorter, COMPGROUP, UniqHLL_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 48:CREATE_SORTER_4TH ( CSphKBufferGroupSorter, COMPGROUP, UniqCount_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 49:CREATE_SORTER_4TH ( MultiValueGroupSorter_T, COMPGROUP, UniqCount_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 50:CREATE_SORTER_4TH ( CSphImplicitGroupSorter, COMPGROUP, UniqCountSingle_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 52:CREATE_SORTER_4TH ( CSphKBufferNGroupSorter, COMPGROUP, UniqCount_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 53:CREATE_SORTER_4TH ( MultiValueNGroupSorter_T, COMPGROUP, UniqCount_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
case 56:CREATE_SORTER_4TH ( CSphKBufferJsonGroupSorter, COMPGROUP, UniqCount_c, pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates );
default: assert(0); return nullptr;
}
}
ISphMatchSorter * CreateGroupSorter ( ESphSortFunc eGroupFunc, const ISphMatchComparator * pComp, const CSphQuery * pQuery, const CSphGroupSorterSettings & tSettings, bool bHasPackedFactors, bool bHasAggregates, const PrecalculatedSorterResults_t & tPrecalc )
{
switch ( eGroupFunc )
{
case FUNC_GENERIC1: return CreateGroupSorter<MatchGeneric1_fn> ( pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates, tPrecalc );
case FUNC_GENERIC2: return CreateGroupSorter<MatchGeneric2_fn> ( pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates, tPrecalc );
case FUNC_GENERIC3: return CreateGroupSorter<MatchGeneric3_fn> ( pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates, tPrecalc );
case FUNC_GENERIC4: return CreateGroupSorter<MatchGeneric4_fn> ( pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates, tPrecalc );
case FUNC_GENERIC5: return CreateGroupSorter<MatchGeneric5_fn> ( pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates, tPrecalc );
case FUNC_EXPR: return CreateGroupSorter<MatchExpr_fn> ( pComp, pQuery, tSettings, bHasPackedFactors, bHasAggregates, tPrecalc );
default: return nullptr;
}
}
| 67,765
|
C++
|
.cpp
| 1,741
| 35.886847
| 265
| 0.711716
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,864
|
searchdexpr.cpp
|
manticoresoftware_manticoresearch/src/searchdexpr.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "exprtraits.h"
#include "searchdexpr.h"
#include "sphinxexcerpt.h"
#include "sphinxutils.h"
#include "sphinxint.h"
#include "attribute.h"
#include "docstore.h"
enum HookType_e
{
HOOK_SNIPPET,
HOOK_HIGHLIGHT
};
static int StringBinary2Number ( const char * sStr, int iLen )
{
if ( !sStr || !iLen )
return 0;
char sBuf[64];
if ( (int)(sizeof ( sBuf )-1 )<iLen )
iLen = sizeof ( sBuf )-1;
memcpy ( sBuf, sStr, iLen );
sBuf[iLen] = '\0';
return atoi ( sBuf );
}
static bool ParseSnippetLimit ( const CSphString & sName, int iVal, SnippetLimits_t & tOpt )
{
if ( sName=="limit" ) tOpt.m_iLimit = iVal;
else if ( sName=="limit_passages" || sName=="limit_snippets" ) tOpt.m_iLimitPassages = iVal;
else if ( sName=="limit_words" ) tOpt.m_iLimitWords = iVal;
else
return false;
return true;
}
static bool ParseSnippetOption ( const CSphNamedVariant & tVariant, SnippetQuerySettings_t & tOpt, CSphString & sError )
{
CSphString sName = tVariant.m_sKey;
sName.ToLower();
const CSphString & sVal = tVariant.m_sValue;
int iVal = tVariant.m_iValue;
bool bVal = tVariant.m_iValue!=0;
if ( ParseSnippetLimit ( sName, iVal, tOpt ) )
return true;
const char * szBegins = "__";
if ( sName.Begins(szBegins) )
{
auto iStartLen = (int)strlen(szBegins);
const char * szTmp = sName.cstr()+iStartLen;
while ( *szTmp && *szTmp!='_' )
szTmp++;
if ( *szTmp )
szTmp++;
CSphString sField = sName.SubString ( iStartLen, szTmp-sName.cstr()-iStartLen-1 );
CSphString sOption = szTmp;
SnippetLimits_t tLimits;
if ( !ParseSnippetLimit ( sOption, iVal, tLimits ) )
{
sError.SetSprintf ( "unknown option %s in %s", sName.cstr(), sOption.cstr() );
return false;
}
tOpt.m_hPerFieldLimits.AddUnique(sField) = tLimits;
return true;
}
if ( sName=="before_match" ) tOpt.m_sBeforeMatch = sVal;
else if ( sName=="after_match" ) tOpt.m_sAfterMatch = sVal;
else if ( sName=="chunk_separator" || sName=="snippet_separator") tOpt.m_sChunkSeparator = sVal;
else if ( sName=="field_separator" ) tOpt.m_sFieldSeparator = sVal;
else if ( sName=="around" ) tOpt.m_iAround = iVal;
else if ( sName=="use_boundaries" ) tOpt.m_bUseBoundaries = bVal;
else if ( sName=="weight_order" ) tOpt.m_bWeightOrder = bVal;
else if ( sName=="force_all_words" ) tOpt.m_bForceAllWords = bVal;
else if ( sName=="start_passage_id" || sName=="start_snippet_id") tOpt.m_iPassageId = iVal;
else if ( sName=="load_files" ) tOpt.m_uFilesMode |= bVal ? 1 : 0;
else if ( sName=="load_files_scattered" ) tOpt.m_uFilesMode |= bVal ? 2 : 0;
else if ( sName=="html_strip_mode" ) tOpt.m_sStripMode = sVal;
else if ( sName=="allow_empty" ) tOpt.m_bAllowEmpty = bVal;
else if ( sName=="emit_zones" ) tOpt.m_bEmitZones = bVal;
else if ( sName=="force_passages" || sName=="force_snippets" ) tOpt.m_bForcePassages = bVal;
else if ( sName=="passage_boundary" || sName=="snippet_boundary" ) tOpt.m_ePassageSPZ = GetPassageBoundary(sVal);
else if ( sName=="json_query" ) tOpt.m_bJsonQuery = bVal;
else if ( sName=="pack_fields" ) tOpt.m_bPackFields = bVal;
else if ( sName=="limits_per_field" ) tOpt.m_bLimitsPerField = bVal;
else if ( sName=="exact_phrase" )
{
sError.SetSprintf ( "exact_phrase option is deprecated" );
return false;
}
else if ( sName=="query_mode" )
{
if ( !bVal )
{
sError.SetSprintf ( "query_mode=0 is deprecated" );
return false;
}
}
else
{
sError.SetSprintf ( "Unknown option: %s", sName.cstr() );
return false;
}
return true;
}
//////////////////////////////////////////////////////////////////////////
class QueryExprTraits_c
{
public:
QueryExprTraits_c ( ISphExpr * pQuery );
QueryExprTraits_c ( const QueryExprTraits_c & rhs );
bool UpdateQuery ( const CSphMatch & tMatch ) const;
const CSphString & GetQuery() const { return m_sQuery; }
bool Command ( ESphExprCommand eCmd, void * pArg );
private:
CSphRefcountedPtr<ISphExpr> m_pQuery;
mutable bool m_bFirstQuery = true;
mutable CSphString m_sQuery;
CSphString FetchQuery ( const CSphMatch & tMatch ) const;
};
QueryExprTraits_c::QueryExprTraits_c ( ISphExpr * pQuery )
: m_pQuery ( pQuery )
{
if ( m_pQuery )
SafeAddRef(m_pQuery);
}
QueryExprTraits_c::QueryExprTraits_c ( const QueryExprTraits_c & rhs )
: m_pQuery ( SafeClone ( rhs.m_pQuery ) )
{}
bool QueryExprTraits_c::UpdateQuery ( const CSphMatch & tMatch ) const
{
CSphString sQuery = FetchQuery(tMatch);
if ( m_bFirstQuery || m_sQuery!=sQuery )
{
m_bFirstQuery = false;
m_sQuery = sQuery;
return true;
}
return false;
}
CSphString QueryExprTraits_c::FetchQuery ( const CSphMatch & tMatch ) const
{
if ( !m_pQuery )
return m_sQuery;
CSphString sQuery;
char * pWords;
int iQueryLen = m_pQuery->StringEval ( tMatch, (const BYTE**)&pWords );
if ( m_pQuery->IsDataPtrAttr() )
sQuery.Adopt ( &pWords );
else
sQuery.SetBinary ( pWords, iQueryLen );
return sQuery;
}
bool QueryExprTraits_c::Command ( ESphExprCommand eCmd, void * pArg )
{
if ( m_pQuery )
m_pQuery->Command ( eCmd, pArg );
if ( eCmd==SPH_EXPR_SET_QUERY && !m_pQuery ) // don't do this if we have a query expression specified
{
CSphString sQuery ( (const char*)pArg );
if ( m_bFirstQuery || m_sQuery!=sQuery )
{
m_sQuery = sQuery;
m_bFirstQuery = false;
return true;
}
}
return false;
}
//////////////////////////////////////////////////////////////////////////
class Expr_HighlightTraits_c : public ISphStringExpr, public QueryExprTraits_c
{
public:
Expr_HighlightTraits_c ( const CSphIndex * pIndex, QueryProfile_c * pProfiler, ISphExpr * pQuery );
void Command ( ESphExprCommand eCmd, void * pArg ) override;
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) override;
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & ) override;
bool IsDataPtrAttr () const final { return true; }
protected:
CSphRefcountedPtr<ISphExpr> m_pArgs;
CSphRefcountedPtr<ISphExpr> m_pText;
CSphString m_sTextAttr;
CSphVector<int> m_dRequestedFieldIds;
const CSphIndex * m_pIndex = nullptr;
QueryProfile_c * m_pProfiler = nullptr;
SnippetQuerySettings_t m_tSnippetQuery;
std::unique_ptr<SnippetBuilder_c> m_pSnippetBuilder;
Expr_HighlightTraits_c ( const Expr_HighlightTraits_c & rhs );
void SetTextExpr ( ISphExpr * pExpr, const ISphSchema * pRsetSchema );
};
Expr_HighlightTraits_c::Expr_HighlightTraits_c ( const CSphIndex * pIndex, QueryProfile_c * pProfiler, ISphExpr * pQuery )
: QueryExprTraits_c ( pQuery )
, m_pIndex ( pIndex )
, m_pProfiler ( pProfiler )
, m_pSnippetBuilder { std::make_unique<SnippetBuilder_c>() }
{}
Expr_HighlightTraits_c::Expr_HighlightTraits_c ( const Expr_HighlightTraits_c & rhs )
: QueryExprTraits_c ( rhs )
, m_pArgs ( SafeClone ( rhs.m_pArgs ) )
, m_pText ( SafeClone ( rhs.m_pText ) )
, m_dRequestedFieldIds ( rhs.m_dRequestedFieldIds )
, m_pIndex ( rhs.m_pIndex )
, m_pProfiler ( rhs.m_pProfiler )
, m_tSnippetQuery ( rhs.m_tSnippetQuery )
, m_pSnippetBuilder { std::make_unique<SnippetBuilder_c>() }
{
m_pSnippetBuilder->Setup ( m_pIndex, m_tSnippetQuery );
}
void Expr_HighlightTraits_c::FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema )
{
if ( !m_pText )
return;
// when highlight expression was created, the m_pText was a GetColumnarString expression
// but when the result is converted to dynamic schema, m_pText becomes a plain GetString expression
// we need to update m_pText pointer
if ( !m_pText->IsColumnar() )
{
m_pText->FixupLocator ( pOldSchema, pNewSchema );
return;
}
m_pText->Command ( SPH_EXPR_GET_COLUMNAR_COL, &m_sTextAttr );
const CSphColumnInfo * pAttr = pNewSchema->GetAttr ( m_sTextAttr.cstr() );
assert(pAttr);
m_pText = pAttr->m_pExpr;
}
void Expr_HighlightTraits_c::Command ( ESphExprCommand eCmd, void * pArg )
{
if ( m_pArgs )
m_pArgs->Command ( eCmd, pArg );
if ( m_pText )
m_pText->Command ( eCmd, pArg );
if ( eCmd==SPH_EXPR_GET_DEPENDENT_COLS && m_pText && m_pText->IsColumnar() )
{
assert ( !m_sTextAttr.IsEmpty() );
static_cast<StrVec_t*>(pArg)->Add(m_sTextAttr);
}
if ( QueryExprTraits_c::Command ( eCmd, pArg ) )
{
// fixme! handle errors
CSphString sError;
m_pSnippetBuilder->SetQuery ( GetQuery(), false, sError );
}
}
uint64_t Expr_HighlightTraits_c::GetHash ( const ISphSchema &, uint64_t, bool & )
{
assert ( 0 && "no highlighting in filters" );
return 0;
}
void Expr_HighlightTraits_c::SetTextExpr ( ISphExpr * pExpr, const ISphSchema * pRsetSchema )
{
m_pText = pExpr;
SafeAddRef(m_pText);
if ( m_pText && m_pText->IsColumnar() )
m_pText->Command ( SPH_EXPR_GET_COLUMNAR_COL, &m_sTextAttr );
}
//////////////////////////////////////////////////////////////////////////
class Expr_Snippet_c : public Expr_HighlightTraits_c
{
public:
Expr_Snippet_c ( ISphExpr * pArglist, const CSphIndex * pIndex, const ISphSchema * pRsetSchema, QueryProfile_c * pProfiler, QueryType_e eQueryType, CSphString & sError );
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const override;
ISphExpr * Clone() const override;
uint64_t GetHash ( const ISphSchema & tSchema, uint64_t uPrevHash, bool & bDisable ) override;
};
Expr_Snippet_c::Expr_Snippet_c ( ISphExpr * pArglist, const CSphIndex * pIndex, const ISphSchema * pRsetSchema, QueryProfile_c * pProfiler, QueryType_e eQueryType, CSphString & sError )
: Expr_HighlightTraits_c ( pIndex, pProfiler, pArglist->GetArg(1) )
{
m_pArgs = pArglist;
SafeAddRef ( m_pArgs );
assert ( m_pArgs->IsArglist() );
SetTextExpr ( pArglist->GetArg(0), pRsetSchema );
CSphMatch tDummy;
char * pWords;
for ( int i = 2; i < pArglist->GetNumArgs(); i++ )
{
assert ( !pArglist->GetArg(i)->IsDataPtrAttr() ); // aware of memleaks potentially caused by StringEval()
int iLen = pArglist->GetArg(i)->StringEval ( tDummy, (const BYTE**)&pWords );
if ( !pWords || !iLen )
continue;
CSphString sArgs;
sArgs.SetBinary ( pWords, iLen );
pWords = const_cast<char *> ( sArgs.cstr() );
const char * sEnd = pWords + iLen;
while ( pWords<sEnd && *pWords && sphIsSpace ( *pWords ) ) pWords++;
char * szOption = pWords;
while ( pWords<sEnd && *pWords && sphIsAlpha ( *pWords ) ) pWords++;
char * szOptEnd = pWords;
while ( pWords<sEnd && *pWords && sphIsSpace ( *pWords ) ) pWords++;
if ( *pWords++!='=' )
{
sError.SetSprintf ( "Error parsing SNIPPET options: %s", pWords );
return;
}
*szOptEnd = '\0';
while ( pWords<sEnd && *pWords && sphIsSpace ( *pWords ) ) pWords++;
char * sValue = pWords;
if ( !*sValue )
{
sError.SetSprintf ( "Error parsing SNIPPET options" );
return;
}
while ( pWords<sEnd && *pWords ) pWords++;
int iStrValLen = pWords - sValue;
CSphNamedVariant tVariant;
tVariant.m_sKey = szOption;
tVariant.m_sValue.SetBinary ( sValue, iStrValLen );
tVariant.m_iValue = StringBinary2Number ( sValue, iStrValLen );
if ( !ParseSnippetOption ( tVariant, m_tSnippetQuery, sError ) )
return;
}
m_tSnippetQuery.m_bJsonQuery = eQueryType==QUERY_JSON;
m_tSnippetQuery.Setup();
m_pSnippetBuilder->Setup ( m_pIndex, m_tSnippetQuery );
m_dRequestedFieldIds.Add(0);
}
int Expr_Snippet_c::StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const
{
CSphScopedProfile tProf ( m_pProfiler, SPH_QSTATE_SNIPPET );
*ppStr = nullptr;
const BYTE * szSource = nullptr;
int iLen = m_pText->StringEval ( tMatch, &szSource );
// kinda like a scoped ptr, but for an array
CSphFixedVector<BYTE> tScoped {0};
if ( m_pText->IsDataPtrAttr() )
tScoped.Set ( (BYTE *)szSource, iLen );
if ( !iLen )
return 0;
if ( UpdateQuery(tMatch) )
{
CSphString sError;
if ( !m_pSnippetBuilder->SetQuery ( GetQuery(), true, sError ) )
return 0;
}
std::unique_ptr<TextSource_i> pSource = CreateSnippetSource ( m_tSnippetQuery.m_uFilesMode, szSource, iLen );
// FIXME! fill in all the missing options; use consthash?
SnippetResult_t tRes;
if ( !m_pSnippetBuilder->Build ( pSource, tRes ) )
return 0;
CSphVector<BYTE> dRes = m_pSnippetBuilder->PackResult ( tRes, m_dRequestedFieldIds );
int iResultLength = dRes.GetLength();
*ppStr = dRes.LeakData();
return iResultLength;
}
uint64_t Expr_Snippet_c::GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME("Expr_Snippet_c");
return CALC_DEP_HASHES();
}
ISphExpr * Expr_Snippet_c::Clone () const
{
return new Expr_Snippet_c ( *this );
}
//////////////////////////////////////////////////////////////////////////
class Expr_Highlight_c final : public Expr_HighlightTraits_c
{
public:
Expr_Highlight_c ( ISphExpr * pArglist, const CSphIndex * pIndex, const ISphSchema * pRsetSchema, QueryProfile_c * pProfiler, QueryType_e eQueryType, CSphString & sError );
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const final;
void Command ( ESphExprCommand eCmd, void * pArg ) final;
ISphExpr * Clone() const final;
uint64_t GetHash ( const ISphSchema & tSchema, uint64_t uPrevHash, bool & bDisable ) override;
private:
DocstoreSession_c::InfoDocID_t m_tSession;
CSphVector<int> m_dFieldsToFetch;
bool m_bFetchAllFields = false;
Expr_Highlight_c ( const Expr_Highlight_c & rhs );
bool FetchFieldsFromDocstore ( DocstoreDoc_t & tFetchedDoc, DocID_t & tDocID ) const;
CSphVector<FieldSource_t> RearrangeFetchedFields ( const DocstoreDoc_t & tFetchedDoc ) const;
void ParseFields ( ISphExpr * pExpr );
bool ParseOptions ( const VecTraits_T<CSphNamedVariant> & dMap, CSphString & sError );
bool MarkRequestedFields ( CSphString & sError );
void MarkAllFields();
};
Expr_Highlight_c::Expr_Highlight_c ( ISphExpr * pArglist, const CSphIndex * pIndex, const ISphSchema * pRsetSchema, QueryProfile_c * pProfiler, QueryType_e eQueryType, CSphString & sError )
: Expr_HighlightTraits_c ( pIndex, pProfiler, ( pArglist && pArglist->IsArglist() && pArglist->GetNumArgs()==3 ) ? pArglist->GetArg(2) : nullptr )
{
assert ( m_pIndex );
if ( pArglist && pArglist->IsArglist() )
{
m_pArgs = pArglist;
SafeAddRef(m_pArgs);
}
int iNumArgs = pArglist ? ( pArglist->IsArglist() ? pArglist->GetNumArgs() : 1 ) : 0;
if ( iNumArgs>=1 )
{
// this should be a map argument. at least we checked that in ExprHook_c::GetReturnType
auto pMapArg = (Expr_MapArg_c *)(pArglist->IsArglist() ? pArglist->GetArg(0) : pArglist);
assert(pMapArg);
VecTraits_T<CSphNamedVariant> dOpts ( pMapArg->m_pValues, pMapArg->m_iCount );
if ( !ParseOptions ( dOpts, sError ) )
return;
}
if ( iNumArgs>=2 )
{
assert ( pArglist && pArglist->IsArglist() );
ISphExpr * pArg2 = pArglist->GetArg(1);
// mode 1: it is a list of stored fields
// mode 2: it is an expression that needs to be evaluated
if ( pArg2->IsConst() )
ParseFields(pArg2);
else
{
SetTextExpr ( pArg2, pRsetSchema );
m_dRequestedFieldIds.Add(0);
}
}
else
MarkAllFields();
m_tSnippetQuery.m_bJsonQuery = eQueryType==QUERY_JSON;
m_tSnippetQuery.Setup();
m_pSnippetBuilder->Setup ( m_pIndex, m_tSnippetQuery );
}
int Expr_Highlight_c::StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const
{
CSphScopedProfile tProf ( m_pProfiler, SPH_QSTATE_SNIPPET );
DocID_t tDocID = sphGetDocID ( tMatch.m_pDynamic ? tMatch.m_pDynamic : tMatch.m_pStatic );
if ( UpdateQuery(tMatch) )
{
CSphString sError;
if ( !m_pSnippetBuilder->SetQuery ( GetQuery(), true, sError ) )
return 0;
}
DocstoreDoc_t tFetchedDoc;
CSphVector<FieldSource_t> dAllFields;
std::unique_ptr<TextSource_i> pSource;
CSphFixedVector<BYTE> tScoped {0}; // scoped array ptr
if ( m_pText )
{
// highlight an expression
const BYTE * szSource = nullptr;
int iLen = m_pText->StringEval ( tMatch, &szSource );
if ( m_pText->IsDataPtrAttr() )
tScoped.Set ( (BYTE *)szSource, iLen );
pSource = CreateSnippetSource ( m_tSnippetQuery.m_uFilesMode, szSource, iLen );
}
else
{
// fetch fields and highlight
if ( !FetchFieldsFromDocstore ( tFetchedDoc, tDocID ) )
return 0;
dAllFields = RearrangeFetchedFields ( tFetchedDoc );
pSource = CreateHighlightSource ( dAllFields );
}
SnippetResult_t tRes;
if ( !m_pSnippetBuilder->Build ( pSource, tRes ) )
return 0;
CSphVector<BYTE> dPacked = m_pSnippetBuilder->PackResult ( tRes, m_dRequestedFieldIds );
int iResultLength = dPacked.GetLength();
*ppStr = dPacked.LeakData();
return iResultLength;
}
void Expr_Highlight_c::Command ( ESphExprCommand eCmd, void * pArg )
{
Expr_HighlightTraits_c::Command ( eCmd, pArg );
if ( eCmd==SPH_EXPR_SET_DOCSTORE_DOCID )
{
const DocstoreSession_c::InfoDocID_t & tSession = *(DocstoreSession_c::InfoDocID_t*)pArg;
bool bMark = tSession.m_pDocstore!=m_tSession.m_pDocstore;
m_tSession = tSession;
if ( bMark )
{
// fixme! handle errors
CSphString sError;
MarkRequestedFields(sError);
}
}
}
ISphExpr * Expr_Highlight_c::Clone () const
{
return new Expr_Highlight_c ( *this );
}
Expr_Highlight_c::Expr_Highlight_c ( const Expr_Highlight_c& rhs )
: Expr_HighlightTraits_c ( rhs )
{}
bool Expr_Highlight_c::FetchFieldsFromDocstore ( DocstoreDoc_t & tFetchedDoc, DocID_t & tDocID ) const
{
if ( !m_tSession.m_pDocstore )
return false;
const CSphVector<int> * pFieldsToFetch = m_bFetchAllFields ? nullptr : &m_dFieldsToFetch;
return m_tSession.m_pDocstore->GetDoc ( tFetchedDoc, tDocID, pFieldsToFetch, m_tSession.m_iSessionId, false );
}
CSphVector<FieldSource_t> Expr_Highlight_c::RearrangeFetchedFields ( const DocstoreDoc_t & tFetchedDoc ) const
{
// we need to arrange fetched fields as in original index schema
// so that field matching will work as expected
CSphVector<FieldSource_t> dAllFields;
const CSphSchema & tSchema = m_pIndex->GetMatchSchema();
for ( int i = 0; i < tSchema.GetFieldsCount(); i++ )
{
const CSphColumnInfo & tInfo = tSchema.GetField(i);
FieldSource_t & tNewField = dAllFields.Add();
tNewField.m_sName = tInfo.m_sName;
if ( !( tInfo.m_uFieldFlags & CSphColumnInfo::FIELD_STORED ) )
continue;
int iFieldId = m_tSession.m_pDocstore->GetFieldId ( tInfo.m_sName, DOCSTORE_TEXT );
assert ( iFieldId!=-1 );
int iFetchedFieldId = -1;
if ( m_bFetchAllFields )
iFetchedFieldId = iFieldId;
else
{
int * pFound = sphBinarySearch ( m_dFieldsToFetch.Begin(), m_dFieldsToFetch.Begin()+m_dFieldsToFetch.GetLength()-1, iFieldId );
if ( pFound )
iFetchedFieldId = pFound-m_dFieldsToFetch.Begin();
}
if ( iFetchedFieldId!=-1 )
tNewField.m_dData = tFetchedDoc.m_dFields[iFetchedFieldId].Slice();
}
return dAllFields;
}
uint64_t Expr_Highlight_c::GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME("Expr_Highlight_c");
return CALC_DEP_HASHES();
}
void Expr_Highlight_c::ParseFields ( ISphExpr * pExpr )
{
assert ( pExpr && !pExpr->IsDataPtrAttr() );
assert(m_pIndex);
CSphString sFields;
char * szFields;
CSphMatch tDummy;
int iLen = pExpr->StringEval ( tDummy, (const BYTE**)&szFields );
sFields.SetBinary ( szFields, iLen );
sFields.ToLower();
sFields.Trim();
StrVec_t dRequestedFieldNames;
sphSplit ( dRequestedFieldNames, sFields.cstr() );
if ( !dRequestedFieldNames.GetLength() && sFields.IsEmpty() )
MarkAllFields();
else
{
const CSphSchema & tSchema = m_pIndex->GetMatchSchema();
for ( const auto & i : dRequestedFieldNames )
{
int iField = tSchema.GetFieldIndex ( i.cstr() );
if ( iField!=-1 )
m_dRequestedFieldIds.Add(iField);
}
}
}
void Expr_Highlight_c::MarkAllFields()
{
m_bFetchAllFields = true;
m_dFieldsToFetch.Resize(0);
const CSphSchema & tSchema = m_pIndex->GetMatchSchema();
for ( int i = 0; i < tSchema.GetFieldsCount(); i++ )
m_dRequestedFieldIds.Add(i);
}
bool Expr_Highlight_c::MarkRequestedFields ( CSphString & sError )
{
m_dFieldsToFetch.Resize(0);
bool bResult = true;
if ( !m_bFetchAllFields )
{
assert ( m_tSession.m_pDocstore );
const CSphSchema & tSchema = m_pIndex->GetMatchSchema();
for ( auto iField : m_dRequestedFieldIds )
{
const char * szField = tSchema.GetFieldName(iField);
int iDocstoreField = m_tSession.m_pDocstore->GetFieldId ( szField, DOCSTORE_TEXT );
if ( iDocstoreField==-1 )
{
sError.SetSprintf ( "field %s not found", szField );
bResult = false;
continue;
}
m_dFieldsToFetch.Add(iDocstoreField);
}
m_dFieldsToFetch.Uniq();
}
return bResult;
}
bool Expr_Highlight_c::ParseOptions ( const VecTraits_T<CSphNamedVariant> & dMap, CSphString & sError )
{
for ( const auto & i : dMap )
{
if ( !ParseSnippetOption ( i, m_tSnippetQuery, sError ) )
return false;
}
return true;
}
//////////////////////////////////////////////////////////////////////////
int ExprHook_c::IsKnownFunc ( const char * sFunc ) const
{
if ( !strcasecmp ( sFunc, "SNIPPET" ) )
return HOOK_SNIPPET;
if ( !strcasecmp ( sFunc, "HIGHLIGHT" ) )
return HOOK_HIGHLIGHT;
return -1;
}
ISphExpr * ExprHook_c::CreateNode ( int iID, ISphExpr * pLeft, const ISphSchema * pRsetSchema, ESphEvalStage * pEvalStage, bool * pNeedDocIds, CSphString & sError )
{
if ( pEvalStage )
*pEvalStage = SPH_EVAL_POSTLIMIT;
if ( pNeedDocIds )
*pNeedDocIds = true;
CSphRefcountedPtr<ISphExpr> pRes;
switch ( iID )
{
case HOOK_SNIPPET:
pRes = new Expr_Snippet_c ( pLeft, m_pIndex, pRsetSchema, m_pProfiler, m_eQueryType, sError );
break;
case HOOK_HIGHLIGHT:
pRes = new Expr_Highlight_c ( pLeft, m_pIndex, pRsetSchema, m_pProfiler, m_eQueryType, sError );
break;
default:
assert ( 0 && "Unknown node type" );
return nullptr;
}
if ( !sError.IsEmpty() )
return nullptr;
return pRes.Leak();
}
ESphAttr ExprHook_c::GetIdentType ( int ) const
{
assert(0);
return SPH_ATTR_NONE;
}
ESphAttr ExprHook_c::GetReturnType ( int iID, const CSphVector<ESphAttr> & dArgs, bool, CSphString & sError ) const
{
switch ( iID )
{
case HOOK_SNIPPET:
if ( dArgs.GetLength()<2 )
{
sError = "SNIPPET() requires 2 or more arguments";
return SPH_ATTR_NONE;
}
if ( dArgs[0]!=SPH_ATTR_STRINGPTR && dArgs[0]!=SPH_ATTR_STRING )
{
sError = "1st argument to SNIPPET() must be a string expression";
return SPH_ATTR_NONE;
}
for ( int i = 1; i < dArgs.GetLength(); i++ )
if ( dArgs[i]!=SPH_ATTR_STRING && dArgs[i]!=SPH_ATTR_STRINGPTR )
{
sError.SetSprintf ( "%d argument to SNIPPET() must be a string", i );
return SPH_ATTR_NONE;
}
break;
case HOOK_HIGHLIGHT:
if ( dArgs.GetLength()>3 )
{
sError = "HIGHLIGHT() requires 0-3 arguments";
return SPH_ATTR_NONE;
}
if ( dArgs.GetLength()>0 && dArgs[0]!=SPH_ATTR_MAPARG )
{
sError = "1st argument to HIGHLIGHT() must be a map";
return SPH_ATTR_NONE;
}
if ( dArgs.GetLength()>1 && dArgs[1]!=SPH_ATTR_STRING && dArgs[1]!=SPH_ATTR_STRINGPTR )
{
sError = "2nd argument to HIGHLIGHT() must be a string";
return SPH_ATTR_NONE;
}
if ( dArgs.GetLength()>2 && dArgs[2]!=SPH_ATTR_STRING && dArgs[2]!=SPH_ATTR_STRINGPTR )
{
sError = "3rd argument to HIGHLIGHT() must be a string";
return SPH_ATTR_NONE;
}
break;
}
return SPH_ATTR_STRINGPTR;
}
| 23,654
|
C++
|
.cpp
| 672
| 32.64881
| 189
| 0.69911
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,865
|
dynamic_idx.cpp
|
manticoresoftware_manticoresearch/src/dynamic_idx.cpp
|
//
// Copyright (c) 2020, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "dynamic_idx.h"
#include "sphinxsort.h"
#include "querycontext.h"
using namespace Threads;
class Feeder_c : public RowBuffer_i
{
CSphSchema* m_pSchema = nullptr;
CSphMatch* m_pMatch = nullptr;
Resumer_fn m_fnCoro;
bool m_bCoroFinished = false;
bool m_bHaveMoreMatches = true;
bool m_bAutoID = true;
int m_iCurCol = 0;
int m_iCurMatch = 1;
bool CallCoro()
{
if ( !m_bCoroFinished )
m_bCoroFinished = m_fnCoro ();
return m_bCoroFinished;
}
const CSphColumnInfo & GetNextCol()
{
assert ( m_pMatch );
assert ( m_iCurCol>=0 );
const CSphColumnInfo & dColQuery = m_pSchema->GetAttr ( m_iCurCol );
++m_iCurCol;
return dColQuery;
}
// insert column into schema
void ColSchema ( const char * szName, MysqlColumnType_e uType )
{
ESphAttr eType = SPH_ATTR_STRINGPTR;
switch ( uType )
{
case MYSQL_COL_LONGLONG: eType = SPH_ATTR_BIGINT; break;
case MYSQL_COL_LONG: eType = SPH_ATTR_INTEGER; break;
case MYSQL_COL_FLOAT: eType = SPH_ATTR_FLOAT; break;
case MYSQL_COL_DOUBLE: eType = SPH_ATTR_DOUBLE; break;
default: break;
}
CSphString sName ( szName );
sName.ToLower();
if ( m_pSchema->GetAttrIndex ( sName.cstr() )<0 )
m_pSchema->AddAttr ( CSphColumnInfo ( szName, eType ), true );
else {
assert ( sName=="id");
m_bAutoID = false;
}
}
public:
explicit Feeder_c ( TableFeeder_fn fnFeed )
{
m_fnCoro = MakeCoroExecutor ( [this, fnFeed = std::move ( fnFeed )] () { fnFeed ( this ); } );
}
~Feeder_c() override
{
while ( !m_bCoroFinished )
CallCoro ();
}
// collecting schema
void SetSchema ( CSphSchema * pSchema )
{
m_pSchema = pSchema;
CallCoro(); // at finish fnCoro will be before returning from HeadEnd().
}
// set upstream match
void SetSorterStuff ( CSphMatch * pMatch )
{
m_pMatch = pMatch;
}
bool FillNextMatch()
{
if ( m_bHaveMoreMatches )
{
m_iCurCol = 0;
if ( m_bAutoID )
{
auto * pID = m_pSchema->GetAttr ( sphGetDocidName () );
m_pMatch->SetAttr ( pID->m_tLocator, m_iCurMatch );
++m_iCurCol;
}
++m_iCurMatch;
CallCoro ();
}
return m_bHaveMoreMatches;
}
void PutStr ( const CSphColumnInfo & tCol, const StringBuilder_c & sMsg )
{
assert ( m_pMatch );
assert ( tCol.m_eAttrType == SPH_ATTR_STRINGPTR );
BYTE * pData = nullptr;
m_pMatch->SetAttr ( tCol.m_tLocator, (SphAttr_t) sphPackPtrAttr ( sMsg.GetLength (), &pData ) );
memcpy ( pData, sMsg.cstr (), sMsg.GetLength () );
}
public:
// Header of the table with defined num of columns
inline void HeadBegin () override
{
if ( !m_pSchema )
return;
// add id column
ColSchema ( sphGetDocidName (), MYSQL_COL_LONGLONG );
}
// add the next column.
void HeadColumn ( const char * sName, MysqlColumnType_e uType ) override
{
if ( m_pSchema )
ColSchema ( sName, uType );
}
bool HeadEnd ( bool bMoreResults, int iWarns ) override
{
if ( !m_pSchema )
{
assert (false && "dynamic table invoked without parent schema");
return false;
}
Coro::Yield_();
return true;
}
// match constructing routines
void PutFloatAsString ( float fVal, const char * sFormat ) override
{
if ( !m_pMatch )
return;
auto & tCol = GetNextCol ();
auto & tMatch = *m_pMatch;
if ( tCol.m_eAttrType!=SPH_ATTR_STRINGPTR )
tMatch.SetAttrFloat ( tCol.m_tLocator, fVal );
else
{
StringBuilder_c sData;
sData.Appendf ( "%f", fVal );
PutStr ( tCol, sData );
}
}
void PutDoubleAsString ( double fVal, const char * szFormat ) override
{
if ( !m_pMatch )
return;
auto & tCol = GetNextCol ();
auto & tMatch = *m_pMatch;
if ( tCol.m_eAttrType!=SPH_ATTR_STRINGPTR )
tMatch.SetAttrDouble ( tCol.m_tLocator, fVal );
else
{
StringBuilder_c sData;
sData.Appendf ( "%f", fVal );
PutStr ( tCol, sData );
}
}
void PutPercentAsString ( int64_t iVal, int64_t iBase ) override
{
if ( iBase )
PutFloatAsString ( iVal * 100.0f / iBase, nullptr );
else
PutFloatAsString ( 100.0f, nullptr );
}
void PutNumAsString ( int64_t iVal ) override
{
if ( !m_pMatch )
return;
auto& tCol = GetNextCol();
auto & tMatch = *m_pMatch;
if ( tCol.m_eAttrType!=SPH_ATTR_STRINGPTR )
tMatch.SetAttr ( tCol.m_tLocator, iVal );
else
{
StringBuilder_c sData;
sData << iVal;
PutStr ( tCol, sData );
}
}
void PutNumAsString ( uint64_t uVal ) override
{
if ( !m_pMatch )
return;
auto & tCol = GetNextCol ();
auto & tMatch = *m_pMatch;
if ( tCol.m_eAttrType!=SPH_ATTR_STRINGPTR )
tMatch.SetAttr ( tCol.m_tLocator, uVal );
else
{
StringBuilder_c sData;
sData << uVal;
PutStr ( tCol, sData );
}
}
void PutNumAsString ( int iVal ) override
{
if ( !m_pMatch )
return;
auto & tCol = GetNextCol ();
auto & tMatch = *m_pMatch;
if ( tCol.m_eAttrType!=SPH_ATTR_STRINGPTR )
tMatch.SetAttr ( tCol.m_tLocator, iVal );
else
{
StringBuilder_c sData;
sData << iVal;
PutStr ( tCol, sData );
}
}
void PutNumAsString ( DWORD uVal ) override
{
if ( !m_pMatch )
return;
auto & tCol = GetNextCol ();
auto & tMatch = *m_pMatch;
if ( tCol.m_eAttrType!=SPH_ATTR_STRINGPTR )
tMatch.SetAttr ( tCol.m_tLocator, uVal );
else
{
StringBuilder_c sData;
sData << uVal;
PutStr ( tCol, sData );
}
}
void PutArray ( const ByteBlob_t&, bool ) override {}
// pack string
void PutString ( Str_t sMsg ) override
{
if ( !m_pMatch )
return;
auto & tCol = GetNextCol ();
auto & tMatch = *m_pMatch;
BYTE * pData = nullptr;
tMatch.SetAttr ( tCol.m_tLocator, (SphAttr_t) sphPackPtrAttr ( sMsg.second, &pData ) );
memcpy ( pData, sMsg.first, sMsg.second );
}
void PutMicrosec ( int64_t iUsec ) override
{
if ( !m_pMatch )
return;
auto & tCol = GetNextCol ();
auto & tMatch = *m_pMatch;
if ( tCol.m_eAttrType == SPH_ATTR_FLOAT )
{
auto fSec = (float)iUsec / 1000000.0f;
tMatch.SetAttr ( tCol.m_tLocator, sphF2DW ( fSec ) );
} else if ( tCol.m_eAttrType==SPH_ATTR_STRINGPTR )
{
StringBuilder_c sData;
sData << iUsec;
PutStr ( tCol, sData );
} else
tMatch.SetAttr ( tCol.m_tLocator, iUsec );
}
void PutNULL () override
{
if ( !m_pMatch )
return;
auto & tCol = GetNextCol ();
auto & tMatch = *m_pMatch;
if ( tCol.m_eAttrType!=SPH_ATTR_STRINGPTR )
tMatch.SetAttr ( tCol.m_tLocator, 0 );
else
{
StringBuilder_c sData;
sData << 0;
PutStr ( tCol, sData );
}
}
public:
/// more high level. Processing the whole tables.
// sends collected data, then reset
bool Commit() override
{
Coro::Yield_ ();
return m_bHaveMoreMatches; // true for continue iteration, false to stop
}
// wrappers for popular packets
void Eof ( bool bMoreResults, int iWarns, const char* ) override
{
m_bHaveMoreMatches = false;
m_pMatch = nullptr; // that should stop any further feeding
Coro::Yield_ (); // generally not need as eof is usually the last stmt, but if not it is safe
}
using RowBuffer_i::Eof;
void Error ( const char * sError, EMYSQL_ERR ) override
{
m_bError = true;
m_sError = sError;
Eof ();
}
void Ok ( int, int, const char *, bool, int64_t ) override {}
void Add ( BYTE ) override {}
};
// feed schema only and skip all the data
class FeederSchema_c : public RowBuffer_i
{
CSphSchema* m_pSchema = nullptr;
CSphMatch* m_pMatch = nullptr;
Resumer_fn m_fnCoro;
bool m_bCoroFinished = false;
bool m_bHaveMoreMatches = true;
int m_iCurMatch = 1;
bool CallCoro()
{
if ( !m_bCoroFinished )
m_bCoroFinished = m_fnCoro ();
return m_bCoroFinished;
}
void PutString ( int iCol, const char * sMsg )
{
if ( !m_pMatch )
return;
int iLen = ( sMsg && *sMsg ) ? (int) strlen ( sMsg ) : 0;
if ( !sMsg )
sMsg = "";
BYTE * pData = nullptr;
m_pMatch->SetAttr ( m_pSchema->GetAttr ( iCol ).m_tLocator, (SphAttr_t) sphPackPtrAttr ( iLen, &pData ) );
memcpy ( pData, sMsg, iLen );
}
public:
explicit FeederSchema_c ( TableFeeder_fn fnFeed )
{
m_fnCoro = MakeCoroExecutor ( [this, fnFeed = std::move ( fnFeed )] () { fnFeed ( this ); } );
}
~FeederSchema_c() override
{
while ( !m_bCoroFinished )
CallCoro ();
}
// collecting schema
void SetSchema ( CSphSchema * pSchema )
{
m_pSchema = pSchema;
m_pSchema->AddAttr ( CSphColumnInfo ( sphGetDocidName (), SPH_ATTR_BIGINT ), true );
m_pSchema->AddAttr ( CSphColumnInfo ( "Field", SPH_ATTR_STRINGPTR ), true );
m_pSchema->AddAttr ( CSphColumnInfo ( "Type", SPH_ATTR_STRINGPTR ), true );
m_pSchema->AddAttr ( CSphColumnInfo ( "Properties", SPH_ATTR_STRINGPTR ), true );
}
// set upstream match
void SetSorterStuff ( CSphMatch * pMatch )
{
m_pMatch = pMatch;
}
bool FillNextMatch()
{
if ( m_bHaveMoreMatches )
CallCoro ();
return m_bHaveMoreMatches;
}
public:
void HeadBegin () override {}
// add the next column.
void HeadColumn ( const char * sName, MysqlColumnType_e uType ) override
{
if ( !m_pSchema )
return;
if ( !m_pMatch )
return;
// docid
m_pMatch->SetAttr ( m_pSchema->GetAttr ( 0 ).m_tLocator, m_iCurMatch );
++m_iCurMatch;
PutString ( 1, sName );
switch ( uType )
{
case MYSQL_COL_LONGLONG: PutString ( 2, "bigint" ); break;
case MYSQL_COL_LONG: PutString ( 2, "uint" ); break;
case MYSQL_COL_FLOAT: PutString ( 2, "float" ); break;
case MYSQL_COL_DOUBLE: PutString ( 2, "double" ); break;
default: PutString ( 2, "string" ); break;
}
PutString ( 3, "" );
Coro::Yield_ ();
}
bool HeadEnd ( bool bMoreResults, int iWarns ) override
{
if ( !m_pSchema )
{
assert (false && "dynamic table invoked without parent schema");
return false;
}
// fixme!
m_bHaveMoreMatches = false;
m_pMatch = nullptr; // that should stop any further feeding
Coro::Yield_ ();
return false;
}
// match constructing routines (empty for schema only)
void PutFloatAsString ( float, const char * ) override {}
void PutDoubleAsString ( double, const char * ) override {}
void PutPercentAsString ( int64_t, int64_t ) override {}
void PutNumAsString ( int64_t ) override {}
void PutNumAsString ( uint64_t ) override {}
void PutNumAsString ( int ) override {}
void PutNumAsString ( DWORD ) override {}
void PutArray ( const ByteBlob_t&, bool ) override {}
void PutString ( Str_t ) override {}
void PutMicrosec ( int64_t ) override {}
void PutNULL () override {}
bool Commit() override { return false;}
void Eof ( bool, int, const char* ) override {}
using RowBuffer_i::Eof;
void Error ( const char * sError, EMYSQL_ERR ) override
{
m_bError = true;
m_sError = sError;
Eof ();
}
void Ok ( int, int, const char *, bool, int64_t ) override {}
void Add ( BYTE ) override {}
};
class GenericTableIndex_c : public CSphIndexStub
{
public:
GenericTableIndex_c ()
: CSphIndexStub ( "dynamic", nullptr )
{}
bool MultiQuery ( CSphQueryResult & , const CSphQuery & , const VecTraits_T<ISphMatchSorter *> &, const CSphMultiQueryArgs & ) const final;
private:
bool MultiScan ( CSphQueryResult & tResult, const CSphQuery & tQuery, const VecTraits_T<ISphMatchSorter *> & dSorters, const CSphMultiQueryArgs & tArgs ) const;
virtual void SetSorterStuff ( CSphMatch * pMatch ) const = 0;
virtual bool FillNextMatch () const = 0;
virtual Str_t GetErrors() const = 0;
};
bool GenericTableIndex_c::MultiQuery ( CSphQueryResult & tResult, const CSphQuery & tQuery,
const VecTraits_T<ISphMatchSorter *> & dAllSorters, const CSphMultiQueryArgs &tArgs ) const
{
MEMORY ( MEM_DISK_QUERY );
// to avoid the checking of a ppSorters's element for NULL on every next step, just filter out all nulls right here
CSphVector<ISphMatchSorter *> dSorters;
dSorters.Reserve ( dAllSorters.GetLength() );
dAllSorters.Apply ([&dSorters] ( ISphMatchSorter* p) { if ( p ) dSorters.Add(p); });
// if we have anything to work with
if ( dSorters.IsEmpty() )
return false;
// non-random at the start, random at the end
dSorters.Sort ( CmpPSortersByRandom_fn () );
const QueryParser_i * pQueryParser = tQuery.m_pQueryParser;
assert ( pQueryParser );
// fast path for scans
if ( pQueryParser->IsFullscan ( tQuery ) )
return MultiScan ( tResult, tQuery, dSorters, tArgs );
return false;
}
class DynMatchProcessor_c : public MatchProcessor_i, ISphNoncopyable
{
public:
DynMatchProcessor_c ( int iTag, const CSphQueryContext &tCtx )
: m_iTag ( iTag )
, m_tCtx ( tCtx )
{}
void Process ( CSphMatch * pMatch ) final { ProcessMatch(pMatch); }
bool ProcessInRowIdOrder() const final { return false; }
void Process ( VecTraits_T<CSphMatch *> & dMatches ) final { dMatches.for_each ( [this]( CSphMatch * pMatch ){ ProcessMatch(pMatch); } ); }
private:
int m_iTag;
const CSphQueryContext & m_tCtx;
inline void ProcessMatch ( CSphMatch * pMatch )
{
if ( pMatch->m_iTag>=0 )
return;
m_tCtx.CalcFinal ( *pMatch );
pMatch->m_iTag = m_iTag;
}
};
bool GenericTableIndex_c::MultiScan ( CSphQueryResult & tResult, const CSphQuery & tQuery, const VecTraits_T<ISphMatchSorter *> & dSorters, const CSphMultiQueryArgs & tArgs ) const
{
assert ( tArgs.m_iTag>=0 );
auto & tMeta = *tResult.m_pMeta;
QueryProfile_c * pProfiler = tMeta.m_pProfile;
// we count documents only (before filters)
if ( tQuery.m_iMaxPredictedMsec )
tMeta.m_bHasPrediction = true;
if ( tArgs.m_uPackedFactorFlags & SPH_FACTOR_ENABLE )
tMeta.m_sWarning.SetSprintf ( "packedfactors() will not work with a fullscan; you need to specify a query" );
// start counting
int64_t tmQueryStart = sphMicroTimer ();
MiniTimer_c dTimerGuard;
int64_t tmMaxTimer = dTimerGuard.Engage ( tQuery.m_uMaxQueryMsec ); // max_query_time
// select the sorter with max schema
// uses GetAttrsCount to get working facets (was GetRowSize)
int iMaxSchemaIndex = GetMaxSchemaIndexAndMatchCapacity ( dSorters ).first;
const ISphSchema & tMaxSorterSchema = *( dSorters[iMaxSchemaIndex]->GetSchema ());
auto dSorterSchemas = SorterSchemas ( dSorters, iMaxSchemaIndex );
// setup calculations and result schema
CSphQueryContext tCtx ( tQuery );
if ( !tCtx.SetupCalc ( tMeta, tMaxSorterSchema, m_tSchema, nullptr, nullptr, dSorterSchemas ) )
return false;
// setup filters
CreateFilterContext_t tFlx;
tFlx.m_pFilters = &tQuery.m_dFilters;
tFlx.m_pFilterTree = &tQuery.m_dFilterTree;
tFlx.m_pMatchSchema = &tMaxSorterSchema;
tFlx.m_pIndexSchema = &m_tSchema;
tFlx.m_eCollation = tQuery.m_eCollation;
tFlx.m_bScan = true;
if ( !tCtx.CreateFilters ( tFlx , tMeta.m_sError, tMeta.m_sWarning ) )
return false;
// prepare to work them rows
bool bRandomize = dSorters[0]->IsRandom();
CSphMatch tMatch;
// note: we reserve dynamic area in match using max sorter schema, but then fill it by locators from index schema.
// that works relying that sorter always includes all attrs from index, leaving final selection of cols
// to result minimizer. Once we try to pre-optimize sorter schema by select list, it will cause crashes here.
tMatch.Reset ( tMaxSorterSchema.GetDynamicSize () );
tMatch.m_iWeight = tArgs.m_iIndexWeight;
tMatch.m_iTag = tCtx.m_dCalcFinal.GetLength () ? -1 : tArgs.m_iTag;
CSphScopedProfile tProf ( pProfiler, SPH_QSTATE_FULLSCAN );
int iCutoff = ( tQuery.m_iCutoff<=0 ) ? -1 : tQuery.m_iCutoff;
SetSorterStuff ( &tMatch );
Threads::Coro::HighFreqChecker_c fnHeavyCheck;
const int64_t& iCheckTimePoint { Threads::Coro::GetNextTimePointUS() };
while ( FillNextMatch() )
{
++tMeta.m_tStats.m_iFetchedDocs;
tCtx.CalcFilter ( tMatch );
if ( tCtx.m_pFilter && !tCtx.m_pFilter->Eval ( tMatch ) )
{
tCtx.FreeDataFilter ( tMatch );
m_tSchema.FreeDataPtrs ( tMatch );
continue;
}
if ( bRandomize )
tMatch.m_iWeight = ( sphRand () & 0xffff ) * tArgs.m_iIndexWeight;
// submit match to sorters
tCtx.CalcSort ( tMatch );
bool bNewMatch = false;
dSorters.Apply ( [&tMatch, &bNewMatch] ( ISphMatchSorter * p ) { bNewMatch |= p->Push ( tMatch ); } );
// stringptr expressions should be duplicated (or taken over) at this point
tCtx.FreeDataFilter ( tMatch );
tCtx.FreeDataSort ( tMatch );
m_tSchema.FreeDataPtrs ( tMatch );
// handle cutoff
if ( bNewMatch && --iCutoff==0 )
break;
// handle timer
if ( sph::TimeExceeded ( tmMaxTimer ) )
{
tMeta.m_sWarning = "query time exceeded max_query_time";
break;
}
if ( fnHeavyCheck() && sph::TimeExceeded ( iCheckTimePoint ) )
{
if ( session::GetKilled() )
{
tMeta.m_sWarning = "query was killed";
break;
}
Threads::Coro::RescheduleAndKeepCrashQuery();
}
}
auto sErrors = GetErrors();
bool bOk = IsEmpty ( sErrors );
if ( !bOk )
tMeta.m_sError = sErrors;
SwitchProfile ( pProfiler, SPH_QSTATE_FINALIZE );
// do final expression calculations
if ( tCtx.m_dCalcFinal.GetLength () )
{
DynMatchProcessor_c tFinal ( tArgs.m_iTag, tCtx );
dSorters.Apply ( [&] ( ISphMatchSorter * p ) { p->Finalize ( tFinal, false, tArgs.m_bFinalizeSorters ); } );
}
tMeta.m_iQueryTime += ( int ) ( ( sphMicroTimer () - tmQueryStart ) / 1000 );
return bOk;
}
///////////////
/// Index for data flow
class DynamicIndex_c final: public GenericTableIndex_c
{
mutable Feeder_c m_tFeeder;
mutable bool m_bSchemaCreated = false;
public:
explicit DynamicIndex_c ( TableFeeder_fn fnFeed)
: m_tFeeder ( std::move ( fnFeed ) )
{}
const CSphSchema & GetMatchSchema () const final;
protected:
~DynamicIndex_c() final = default;
private:
void SetSorterStuff ( CSphMatch * pMatch ) const final;
bool FillNextMatch () const final;
Str_t GetErrors () const final;
};
const CSphSchema & DynamicIndex_c::GetMatchSchema () const
{
if ( !m_bSchemaCreated )
{
m_tFeeder.SetSchema ( const_cast<CSphSchema *> (&m_tSchema) );
m_bSchemaCreated = true;
}
return m_tSchema;
}
void DynamicIndex_c::SetSorterStuff ( CSphMatch * pMatch ) const
{
assert ( m_bSchemaCreated );
m_tFeeder.SetSorterStuff(pMatch);
}
bool DynamicIndex_c::FillNextMatch () const
{
return m_tFeeder.FillNextMatch();
}
Str_t DynamicIndex_c::GetErrors () const
{
return FromStr ( m_tFeeder.GetError() );
}
///////////////
/// Index for schema data flow
class DynamicIndexSchema_c final : public GenericTableIndex_c
{
mutable FeederSchema_c m_tFeeder;
mutable bool m_bSchemaCreated = false;
public:
explicit DynamicIndexSchema_c ( TableFeeder_fn fnFeed)
: m_tFeeder ( std::move ( fnFeed ) )
{}
const CSphSchema & GetMatchSchema () const final;
protected:
~DynamicIndexSchema_c() final = default;
private:
void SetSorterStuff ( CSphMatch * pMatch ) const final;
bool FillNextMatch () const final;
Str_t GetErrors () const final;
};
const CSphSchema & DynamicIndexSchema_c::GetMatchSchema () const
{
if ( !m_bSchemaCreated )
{
m_tFeeder.SetSchema ( const_cast<CSphSchema *> (&m_tSchema) );
m_bSchemaCreated = true;
}
return m_tSchema;
}
void DynamicIndexSchema_c::SetSorterStuff ( CSphMatch * pMatch ) const
{
assert ( m_bSchemaCreated );
m_tFeeder.SetSorterStuff(pMatch);
}
bool DynamicIndexSchema_c::FillNextMatch () const
{
return m_tFeeder.FillNextMatch();
}
Str_t DynamicIndexSchema_c::GetErrors () const
{
return FromStr ( m_tFeeder.GetError() );
}
static ServedIndexRefPtr_c MakeServed ( CSphIndex* pIndex )
{
auto pServed = MakeServedIndex();
pServed->SetIdx ( std::unique_ptr<CSphIndex> ( pIndex ) );
return pServed;
}
/// external functions
ServedIndexRefPtr_c MakeDynamicIndex ( TableFeeder_fn fnFeed )
{
return MakeServed ( new DynamicIndex_c ( std::move ( fnFeed ) ) );
}
ServedIndexRefPtr_c MakeDynamicIndexSchema ( TableFeeder_fn fnFeed )
{
return MakeServed ( new DynamicIndexSchema_c ( std::move ( fnFeed ) ) );
}
| 20,075
|
C++
|
.cpp
| 664
| 27.584337
| 180
| 0.698589
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,866
|
costestimate.cpp
|
manticoresoftware_manticoresearch/src/costestimate.cpp
|
//
//
// Copyright (c) 2018-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "costestimate.h"
#include "sphinxint.h"
#include "sphinxsort.h"
#include "columnarfilter.h"
#include "secondaryindex.h"
#include "geodist.h"
#include <math.h>
#include "std/sys.h"
static float EstimateMTCost ( float fCost, int iThreads, float fKPerf, float fBPerf )
{
if ( iThreads==1 )
return fCost;
int iMaxThreads = GetNumLogicalCPUs();
float fMaxPerfCoeff = fKPerf*iMaxThreads + fBPerf;
float fMinCost = fCost/fMaxPerfCoeff;
if ( iThreads==iMaxThreads )
return fMinCost;
const float fX1 = 1.0f;
float fX2 = iMaxThreads;
float fY1 = fCost;
float fY2 = fMinCost;
// cost = A/sqrt(num_threads) + B
float fA = ( fY2-fY1 ) / ( 1.0f/float(sqrt(fX2)) - 1.0f/float(sqrt(fX1)) );
float fB = fY1 - fA / float(sqrt(fX1));
float fX = iThreads;
float fY = fA/float(sqrt(fX)) + fB;
return fY;
}
float EstimateMTCost ( float fCost, int iThreads )
{
const float fKPerf = 0.45f;
const float fBPerf = 1.40f;
return EstimateMTCost ( fCost, iThreads, fKPerf, fBPerf );
}
float EstimateMTCostCS ( float fCost, int iThreads )
{
const float fKPerf = 0.16f;
const float fBPerf = 1.38f;
return EstimateMTCost ( fCost, iThreads, fKPerf, fBPerf );
}
float EstimateMTCostSI ( float fCost, int iThreads )
{
const float fKPerf = 0.10f;
const float fBPerf = 1.56f;
return EstimateMTCost ( fCost, iThreads, fKPerf, fBPerf );
}
float EstimateMTCostSIFT ( float fCost, int iThreads )
{
const float fKPerf = 0.235f;
const float fBPerf = 1.25f;
return EstimateMTCost ( fCost, iThreads, fKPerf, fBPerf );
}
/////////////////////////////////////////////////////////////////////
class CostEstimate_c : public CostEstimate_i
{
friend float CalcIntersectCost ( int64_t iDocs );
friend float CalcFTIntersectCost ( const NodeEstimate_t & tEst1, const NodeEstimate_t & tEst2, int64_t iTotalDocs, int iDocsPerBlock1, int iDocsPerBlock2 );
public:
CostEstimate_c ( const CSphVector<SecondaryIndexInfo_t> & dSIInfo, const SelectIteratorCtx_t & tCtx, int iCutoff );
float CalcQueryCost() final;
private:
static constexpr float SCALE = 1.0f/1000000.0f;
static constexpr float COST_PUSH = 6.0f;
static constexpr float COST_PUSH_IG = 3.0f;
static constexpr float COST_FILTER = 8.5f;
static constexpr float COST_COLUMNAR_FILTER = 4.0f;
static constexpr float COST_INDEX_READ_SINGLE = 4.0f;
static constexpr float COST_INDEX_READ_BITMAP = 4.5f;
static constexpr float COST_INDEX_UNION_COEFF = 4.0f;
static constexpr float COST_LOOKUP_READ = 20.0f;
static constexpr float COST_INDEX_ITERATOR_INIT = 30.0f;
static constexpr float COST_ITERATOR_INTERSECT = 20.0f;
const CSphVector<SecondaryIndexInfo_t> & m_dSIInfo;
const SelectIteratorCtx_t & m_tCtx;
int m_iCutoff = -1;
CSphVector<int> m_dSorted;
static float Cost_Filter ( int64_t iDocs, float fComplexity ) { return COST_FILTER*fComplexity*iDocs*SCALE; }
static float Cost_BlockFilter ( int64_t iDocs, float fComplexity ) { return Cost_Filter ( iDocs/DOCINFO_INDEX_FREQ, fComplexity ); }
static float Cost_ColumnarFilter ( int64_t iDocs, float fComplexity ){ return COST_COLUMNAR_FILTER*fComplexity*iDocs*SCALE; }
static float Cost_Push ( int64_t iDocs ) { return COST_PUSH*iDocs*SCALE; }
static float Cost_PushImplicitGroupby ( int64_t iDocs ) { return COST_PUSH_IG*iDocs*SCALE; }
static float Cost_IndexReadSingle ( int64_t iDocs ) { return COST_INDEX_READ_SINGLE*iDocs*SCALE; }
static float Cost_IndexReadBitmap ( int64_t iDocs ) { return COST_INDEX_READ_BITMAP*iDocs*SCALE; }
static float Cost_IndexUnionQueue ( int64_t iDocs ) { return COST_INDEX_UNION_COEFF*iDocs*log2f(iDocs)*SCALE; }
static float Cost_LookupRead ( int64_t iDocs ) { return COST_LOOKUP_READ*iDocs*SCALE; }
static float Cost_IndexIteratorInit ( int64_t iNumIterators ) { return COST_INDEX_ITERATOR_INIT*iNumIterators*SCALE; }
float CalcFilterCost ( const SecondaryIndexInfo_t & tIndex, const CSphFilterSettings & tFilter, bool bFromIterator, bool bFilterOverExpr, float fDocsLeft ) const;
float CalcLookupCost ( const SecondaryIndexInfo_t & tIndex ) const;
float CalcAnalyzerCost ( const SecondaryIndexInfo_t & tIndex, const CSphFilterSettings & tFilter, float fDocsLeft ) const;
float CalcIndexCost ( const SecondaryIndexInfo_t & tIndex, const CSphFilterSettings & tFilter, float fDocsLeft ) const;
float CalcIteratorIntersectCost ( float fFirstIteratorDocs, int iNumIterators );
float CalcPushCost ( float fDocsAfterFilters ) const;
float CalcMTCost ( float fCost ) const { return EstimateMTCost ( fCost, m_tCtx.m_iThreads );}
float CalcMTCostCS ( float fCost ) const { return EstimateMTCostCS ( fCost, m_tCtx.m_iThreads );}
float CalcMTCostSI ( float fCost ) const { return EstimateMTCostSI ( fCost, m_tCtx.m_iThreads ); }
bool IsGeodistFilter ( const CSphFilterSettings & tFilter ) const;
bool IsPoly2dFilter ( const CSphFilterSettings & tFilter, int & iNumPoints ) const;
float CalcGetFilterComplexity ( const SecondaryIndexInfo_t & tSIInfo, const CSphFilterSettings & tFilter ) const;
bool NeedBitmapUnion ( int iNumIterators ) const;
uint32_t CalcNumSIIterators ( const CSphFilterSettings & tFilter, int64_t iDocs ) const;
int64_t ApplyCutoff ( int64_t iDocs ) const;
void SortIndexes();
int GetNumIndexes() const { return m_dSIInfo.GetLength(); }
const SecondaryIndexInfo_t & GetIndex ( int iIndex ) const { return m_dSIInfo[m_dSorted[iIndex]]; }
const CSphFilterSettings & GetFilter ( int iIndex ) const { return m_tCtx.m_dFilters[m_dSorted[iIndex]]; }
bool IsFilterOverExpr ( int iIndex ) const;
};
CostEstimate_c::CostEstimate_c ( const CSphVector<SecondaryIndexInfo_t> & dSIInfo, const SelectIteratorCtx_t & tCtx, int iCutoff )
: m_dSIInfo ( dSIInfo )
, m_tCtx ( tCtx )
, m_iCutoff ( iCutoff )
{}
bool CostEstimate_c::NeedBitmapUnion ( int iNumIterators ) const
{
// this needs to be in sync with iterator construction code
const int BITMAP_ITERATOR_THRESH = 8;
return iNumIterators>BITMAP_ITERATOR_THRESH;
}
int64_t CostEstimate_c::ApplyCutoff ( int64_t iDocs ) const
{
if ( m_iCutoff<0 )
return iDocs;
return Min ( iDocs, m_iCutoff );
}
bool CostEstimate_c::IsGeodistFilter ( const CSphFilterSettings & tFilter ) const
{
int iAttr = GetAliasedAttrIndex ( tFilter.m_sAttrName, m_tCtx.m_tQuery, m_tCtx.m_tSorterSchema );
if ( iAttr<0 )
return false;
const CSphColumnInfo & tAttr = m_tCtx.m_tSorterSchema.GetAttr(iAttr);
if ( !tAttr.m_pExpr )
return false;
std::pair<GeoDistSettings_t *, bool> tSettingsPair { nullptr, false };
tAttr.m_pExpr->Command ( SPH_EXPR_GET_GEODIST_SETTINGS, &tSettingsPair );
return tSettingsPair.second;
}
bool CostEstimate_c::IsPoly2dFilter ( const CSphFilterSettings & tFilter, int & iNumPoints ) const
{
int iAttr = GetAliasedAttrIndex ( tFilter.m_sAttrName, m_tCtx.m_tQuery, m_tCtx.m_tSorterSchema );
if ( iAttr<0 )
return false;
const CSphColumnInfo & tAttr = m_tCtx.m_tSorterSchema.GetAttr(iAttr);
if ( !tAttr.m_pExpr )
return false;
std::pair<Poly2dBBox_t *, bool> tSettingsPair { nullptr, false };
tAttr.m_pExpr->Command ( SPH_EXPR_GET_POLY2D_BBOX, &tSettingsPair );
if ( tSettingsPair.second )
{
iNumPoints = tSettingsPair.first->m_iNumPoints;
return true;
}
return false;
}
float CostEstimate_c::CalcGetFilterComplexity ( const SecondaryIndexInfo_t & tSIInfo, const CSphFilterSettings & tFilter ) const
{
if ( IsGeodistFilter(tFilter) )
return 3.0f;
int iNumPoints = 0;
if ( IsPoly2dFilter ( tFilter, iNumPoints ) )
{
const float COMPLEXITY_PER_POINT = 0.007f;
return 1.2f + COMPLEXITY_PER_POINT*iNumPoints;
}
auto pAttr = m_tCtx.m_tIndexSchema.GetAttr ( tFilter.m_sAttrName.cstr() );
if ( !pAttr )
return 1.0f;
ESphAttr eAttrType = pAttr->m_eAttrType;
float fFilterComplexity = 1.0f;
if ( ( eAttrType==SPH_ATTR_UINT32SET || eAttrType==SPH_ATTR_INT64SET ) && tFilter.m_eType==SPH_FILTER_VALUES )
{
float fCoeff = Max ( float( tSIInfo.m_iTotalValues )/m_tCtx.m_iTotalDocs, 2.0f );
fFilterComplexity *= log2f(fCoeff)*tFilter.m_dValues.GetLength();
}
else if ( tFilter.m_eType==SPH_FILTER_STRING || tFilter.m_eType==SPH_FILTER_STRING_LIST )
{
const float STRING_COMPLEXITY = 10.0f;
float fCoeff = Max ( float( tSIInfo.m_iTotalValues )/m_tCtx.m_iTotalDocs, 2.0f );
fFilterComplexity *= STRING_COMPLEXITY*log2f(fCoeff)*tFilter.m_dStrings.GetLength();
}
return fFilterComplexity;
}
float CostEstimate_c::CalcFilterCost ( const SecondaryIndexInfo_t & tIndex, const CSphFilterSettings & tFilter, bool bFromIterator, bool bFilterOverExpr, float fDocsLeft ) const
{
assert ( tIndex.m_eType==SecondaryIndexType_e::FILTER );
float fFilterComplexity = CalcGetFilterComplexity ( tIndex, tFilter );
if ( bFromIterator || bFilterOverExpr )
{
int64_t iDocsToProcess = int64_t(fDocsLeft*m_tCtx.m_iTotalDocs);
iDocsToProcess = ApplyCutoff ( iDocsToProcess );
return Cost_Filter ( iDocsToProcess, fFilterComplexity );
}
if ( tFilter.m_eType==SPH_FILTER_STRING || tFilter.m_eType==SPH_FILTER_STRING_LIST )
return Cost_Filter ( m_tCtx.m_iTotalDocs, fFilterComplexity );
int64_t iDocsToFilter = int64_t ( (float)ApplyCutoff ( tIndex.m_iRsetEstimate ) * m_tCtx.m_iTotalDocs / ( tIndex.m_iRsetEstimate + 1 ) );
float fCost = Cost_Filter ( iDocsToFilter, fFilterComplexity );
fCost += Cost_BlockFilter ( m_tCtx.m_iTotalDocs, fFilterComplexity );
return fCost;
}
float CostEstimate_c::CalcLookupCost ( const SecondaryIndexInfo_t & tIndex ) const
{
// no cutoff here since lookup reader fetches all docs and sorts them
return Cost_LookupRead ( tIndex.m_iRsetEstimate );
}
float CostEstimate_c::CalcAnalyzerCost ( const SecondaryIndexInfo_t & tIndex, const CSphFilterSettings & tFilter, float fDocsLeft ) const
{
assert ( tIndex.m_eType==SecondaryIndexType_e::ANALYZER );
assert ( m_tCtx.m_pColumnar );
columnar::AttrInfo_t tAttrInfo;
m_tCtx.m_pColumnar->GetAttrInfo ( tFilter.m_sAttrName.cstr(), tAttrInfo );
float fFilterComplexity = CalcGetFilterComplexity ( tIndex, tFilter );
int64_t iDocsBeforeFilter = tIndex.m_iPartialColumnarMinMax==-1 ? m_tCtx.m_iTotalDocs : std::min ( tIndex.m_iPartialColumnarMinMax, m_tCtx.m_iTotalDocs );
// filters that process but reject values are 2x faster
float fAcceptCoeff = std::min ( float(tIndex.m_iRsetEstimate)/iDocsBeforeFilter, 1.0f ) / 2.0f + 0.5f;
float fTotalCoeff = fFilterComplexity*tAttrInfo.m_fComplexity*fAcceptCoeff;
int64_t iDocsToFilter = int64_t ( (float)ApplyCutoff ( tIndex.m_iRsetEstimate ) * m_tCtx.m_iTotalDocs / ( tIndex.m_iRsetEstimate + 1 ) );
const int64_t READ_BLOCK_SIZE = 1024;
iDocsBeforeFilter = Max ( int64_t(iDocsBeforeFilter*fDocsLeft), READ_BLOCK_SIZE );
iDocsToFilter = Max ( int64_t(iDocsToFilter*fDocsLeft), READ_BLOCK_SIZE );
if ( tIndex.m_iPartialColumnarMinMax==-1 ) // no minmax? scan whole index
return Cost_ColumnarFilter ( iDocsToFilter, fTotalCoeff );
// minmax tree eval
const int MINMAX_NODE_SIZE = 1024;
int iMatchingNodes = ( tIndex.m_iRsetEstimate + MINMAX_NODE_SIZE - 1 ) / MINMAX_NODE_SIZE;
int iTreeLevels = sphLog2 ( m_tCtx.m_iTotalDocs );
float fCost = Cost_Filter ( iMatchingNodes*iTreeLevels, fFilterComplexity );
const float MINMAX_RATIO = 0.9f;
if ( (float)tIndex.m_iPartialColumnarMinMax / m_tCtx.m_iTotalDocs >= MINMAX_RATIO )
fCost += Cost_ColumnarFilter ( iDocsToFilter, fTotalCoeff );
else
fCost += Cost_ColumnarFilter ( std::min ( iDocsBeforeFilter, iDocsToFilter ), fTotalCoeff );
return fCost;
}
float CostEstimate_c::CalcIndexCost ( const SecondaryIndexInfo_t & tIndex, const CSphFilterSettings & tFilter, float fDocsLeft ) const
{
assert ( tIndex.m_eType==SecondaryIndexType_e::INDEX );
float fCost = 0.0f;
int64_t iDocs = ApplyCutoff ( tIndex.m_iRsetEstimate );
uint32_t uNumIterators = tIndex.m_uNumSIIterators;
if ( !uNumIterators )
return 0.0f;
const int64_t READ_BLOCK_SIZE = 1024;
int64_t iDocsToRead = Max ( int64_t(iDocs*fDocsLeft), READ_BLOCK_SIZE );
if ( uNumIterators==1 )
fCost += Cost_IndexReadSingle(iDocsToRead);
else
{
if ( NeedBitmapUnion(uNumIterators) )
fCost += Cost_IndexReadBitmap(iDocs); // read all docs (when constructing the bitmap), not only the ones left
else
fCost += Cost_IndexUnionQueue(iDocsToRead);
}
fCost += Cost_IndexIteratorInit(uNumIterators);
return fCost;
}
void CostEstimate_c::SortIndexes()
{
m_dSorted.Resize ( m_dSIInfo.GetLength() );
ARRAY_FOREACH ( i, m_dSorted )
m_dSorted[i] = i;
m_dSorted.Sort ( Lesser ( [this] ( int iA, int iB )
{
if ( m_dSIInfo[iA].m_eType==SecondaryIndexType_e::FILTER && m_dSIInfo[iB].m_eType!=SecondaryIndexType_e::FILTER )
return false;
if ( m_dSIInfo[iA].m_eType!=SecondaryIndexType_e::FILTER && m_dSIInfo[iB].m_eType==SecondaryIndexType_e::FILTER )
return true;
return m_dSIInfo[iA].m_iRsetEstimate < m_dSIInfo[iB].m_iRsetEstimate;
} ) );
}
bool CostEstimate_c::IsFilterOverExpr ( int iIndex ) const
{
auto & tIndex = GetIndex(iIndex);
if ( tIndex.m_eType!=SecondaryIndexType_e::FILTER )
return false;
auto & tFilter = GetFilter(iIndex);
int iAttr = GetAliasedAttrIndex ( tFilter.m_sAttrName, m_tCtx.m_tQuery, m_tCtx.m_tSorterSchema );
if ( iAttr<0 )
return true;
return !!m_tCtx.m_tSorterSchema.GetAttr(iAttr).m_pExpr;
}
float CostEstimate_c::CalcIteratorIntersectCost ( float fFirstIteratorDocs, int iNumIterators )
{
int64_t iDocs = fFirstIteratorDocs*m_tCtx.m_iTotalDocs;
return COST_ITERATOR_INTERSECT*iDocs*(iNumIterators-1)*SCALE;
}
float CostEstimate_c::CalcPushCost ( float fDocsAfterFilters ) const
{
int64_t iDocsToPush = fDocsAfterFilters*m_tCtx.m_iTotalDocs;
iDocsToPush = ApplyCutoff(iDocsToPush);
if ( HasImplicitGrouping ( m_tCtx.m_tQuery ) )
return Cost_PushImplicitGroupby ( iDocsToPush );
return Cost_Push ( iDocsToPush );
}
float CostEstimate_c::CalcQueryCost()
{
SortIndexes();
int iNumLookups = 0;
int iNumAnalyzers = 0;
int iNumIndexes = 0;
float fFirstIteratorDocs = 0.0f;
bool bFirstDocsAssigned = false;
float fCost = 0.0f;
float fDocsLeft = m_tCtx.m_fDocsLeft;
for ( int i = 0; i < GetNumIndexes(); i++ )
{
const auto & tIndex = GetIndex(i);
const auto & tFilter = GetFilter(i);
float fIndexProbability = float(tIndex.m_iRsetEstimate) / m_tCtx.m_iTotalDocs;
switch ( tIndex.m_eType )
{
case SecondaryIndexType_e::LOOKUP:
fCost += CalcLookupCost(tIndex);
iNumLookups++;
break;
case SecondaryIndexType_e::ANALYZER:
fCost += CalcAnalyzerCost ( tIndex, tFilter, fDocsLeft );
iNumAnalyzers++;
break;
case SecondaryIndexType_e::INDEX:
fCost += CalcIndexCost ( tIndex, tFilter, fDocsLeft );
iNumIndexes++;
break;
case SecondaryIndexType_e::FILTER:
fCost += CalcFilterCost ( tIndex, tFilter, m_tCtx.m_bFromIterator || ( iNumLookups + iNumAnalyzers + iNumIndexes ) >0, IsFilterOverExpr(i), fDocsLeft );
break;
case SecondaryIndexType_e::NONE:
continue;
default:
break;
}
fDocsLeft *= fIndexProbability;
if ( iNumLookups + iNumAnalyzers + iNumIndexes > 0 && !bFirstDocsAssigned )
{
fFirstIteratorDocs = fDocsLeft;
bFirstDocsAssigned = true;
}
}
int iToIntersect = iNumLookups + iNumAnalyzers + iNumIndexes;
if ( iToIntersect > 1 )
fCost += CalcIteratorIntersectCost ( fFirstIteratorDocs, iToIntersect );
if ( m_tCtx.m_bCalcPushCost )
fCost += CalcPushCost(fDocsLeft);
if ( !iNumLookups ) // docid lookups always run in a single thread
fCost = iNumIndexes ? CalcMTCostSI(fCost) : ( iNumAnalyzers ? CalcMTCostCS(fCost) : CalcMTCost(fCost) );
return fCost;
}
/////////////////////////////////////////////////////////////////////
SelectIteratorCtx_t::SelectIteratorCtx_t ( const CSphQuery & tQuery, const CSphVector<CSphFilterSettings> & dFilters, const ISphSchema & tIndexSchema, const ISphSchema & tSorterSchema, const HistogramContainer_c * pHistograms, columnar::Columnar_i * pColumnar, const SIContainer_c & tSI, int iCutoff, int64_t iTotalDocs, int iThreads )
: m_tQuery ( tQuery )
, m_dFilters ( dFilters )
, m_tIndexSchema ( tIndexSchema )
, m_tSorterSchema ( tSorterSchema )
, m_pHistograms ( pHistograms )
, m_pColumnar ( pColumnar )
, m_tSI ( tSI )
, m_iCutoff ( iCutoff )
, m_iTotalDocs ( iTotalDocs )
, m_iThreads ( iThreads )
{}
bool SelectIteratorCtx_t::IsEnabled_SI ( const CSphFilterSettings & tFilter ) const
{
if ( m_tSI.IsEmpty() )
return false;
if ( tFilter.m_eType!=SPH_FILTER_VALUES && tFilter.m_eType!=SPH_FILTER_STRING && tFilter.m_eType!=SPH_FILTER_STRING_LIST && tFilter.m_eType!=SPH_FILTER_RANGE && tFilter.m_eType!=SPH_FILTER_FLOATRANGE && tFilter.m_eType!=SPH_FILTER_NULL )
return false;
// all(mva\string) need to scan whole row
if ( tFilter.m_eMvaFunc==SPH_MVAFUNC_ALL )
return false;
const CSphColumnInfo * pCol = m_tIndexSchema.GetAttr ( tFilter.m_sAttrName.cstr() );
// FIXME!!! warn in case force index used but index was skipped
if ( pCol && ( pCol->m_eAttrType==SPH_ATTR_STRING && m_tQuery.m_eCollation!=SPH_COLLATION_DEFAULT ) )
return false;
return m_tSI.IsEnabled ( tFilter.m_sAttrName );
}
bool SelectIteratorCtx_t::IsEnabled_Analyzer ( const CSphFilterSettings & tFilter ) const
{
auto pAttr = m_tIndexSchema.GetAttr ( tFilter.m_sAttrName.cstr() );
return pAttr && ( pAttr->IsColumnar() || pAttr->IsColumnarExpr() );
}
/////////////////////////////////////////////////////////////////////
CostEstimate_i * CreateCostEstimate ( const CSphVector<SecondaryIndexInfo_t> & dSIInfo, const SelectIteratorCtx_t & tCtx, int iCutoff )
{
return new CostEstimate_c ( dSIInfo, tCtx, iCutoff );
}
float CalcFTIntersectCost ( const NodeEstimate_t & tEst1, const NodeEstimate_t & tEst2, int64_t iTotalDocs, int iDocsPerBlock1, int iDocsPerBlock2 )
{
if ( !tEst1.m_iDocs || !tEst2.m_iDocs )
return 0.0f;
int64_t iCorrectedDocs1 = tEst1.m_iDocs;
int64_t iCorrectedDocs2 = tEst2.m_iDocs;
float fCorrectedCost1 = tEst1.m_fCost;
float fCorrectedCost2 = tEst2.m_fCost;
float fIntersection = float(tEst1.m_iDocs)/iTotalDocs*float(tEst2.m_iDocs)/iTotalDocs;
int64_t iHintCalls = tEst1.m_iDocs * tEst1.m_iTerms / iDocsPerBlock1 + tEst2.m_iDocs * tEst2.m_iTerms / iDocsPerBlock2;
const float THRESH = 0.05f;
if ( fIntersection > THRESH )
{
float fIntersection = float(tEst1.m_iDocs)/iTotalDocs*float(tEst2.m_iDocs)/iTotalDocs;
iCorrectedDocs1 = int64_t(tEst1.m_iDocs*fIntersection);
iCorrectedDocs2 = int64_t(tEst2.m_iDocs*fIntersection);
fCorrectedCost1 *= fIntersection;
fCorrectedCost2 *= fIntersection;
} else
{
// intersection of left and right result sets is small
// best case scenario: rowid ranges do not overlap; one hint call is enough to stop the search
// worst case scenario: rowid ranges fully overlap; hint calls do nothing
// let's evaluate mid scenario: hint calls have some effect, but we still have to evaluate half of all docs
// since we are comparing this estimate with full FT match cost, it will ok
iCorrectedDocs1 /= 2;
fCorrectedCost1 /= 2.0f;
iCorrectedDocs2 /= 2;
fCorrectedCost2 /= 2.0f;
}
const float COST_INTERSECT = 20.0f;
const float COST_HINTCALL = 35.0f;
return fCorrectedCost1 + fCorrectedCost2 + ( COST_INTERSECT*(iCorrectedDocs1+iCorrectedDocs2) + COST_HINTCALL*iHintCalls )*CostEstimate_c::SCALE;
}
| 19,603
|
C++
|
.cpp
| 427
| 43.540984
| 335
| 0.743711
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,867
|
docstore.cpp
|
manticoresoftware_manticoresearch/src/docstore.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "docstore.h"
#include "std/lrucache.h"
#include "fileio.h"
#include "memio.h"
#include "fileutils.h"
#include "attribute.h"
#include "indexcheck.h"
#include "lz4/lz4.h"
#include "lz4/lz4hc.h"
#include "sphinxint.h"
enum BlockFlags_e : BYTE
{
BLOCK_FLAG_COMPRESSED = 1 << 0,
BLOCK_FLAG_FIELD_REORDER = 1 << 1
};
enum BlockType_e : BYTE
{
BLOCK_TYPE_SMALL,
BLOCK_TYPE_BIG,
BLOCK_TYPE_TOTAL
};
enum DocFlags_e : BYTE
{
DOC_FLAG_ALL_EMPTY = 1 << 0,
DOC_FLAG_EMPTY_BITMASK = 1 << 1
};
enum FieldFlags_e : BYTE
{
FIELD_FLAG_COMPRESSED = 1 << 0,
FIELD_FLAG_EMPTY = 1 << 1
};
static const int STORAGE_VERSION = 1;
//////////////////////////////////////////////////////////////////////////
static BYTE Compression2Byte ( Compression_e eComp )
{
switch (eComp)
{
case Compression_e::NONE: return 0;
case Compression_e::LZ4: return 1;
case Compression_e::LZ4HC: return 2;
default:
assert ( 0 && "Unknown compression type" );
return 0;
}
}
static Compression_e Byte2Compression ( BYTE uComp )
{
switch (uComp)
{
case 0: return Compression_e::NONE;
case 1: return Compression_e::LZ4;
case 2: return Compression_e::LZ4HC;
default:
assert ( 0 && "Unknown compression type" );
return Compression_e::NONE;
}
}
static void PackData ( CSphVector<BYTE> & dDst, const BYTE * pData, DWORD uSize, bool bText, bool bPack )
{
if ( bPack )
{
const DWORD GAP = 8;
dDst.Resize ( uSize+GAP );
dDst.Resize ( sphPackPtrAttr ( dDst.Begin (), {pData, uSize} ));
}
else
{
dDst.Resize ( uSize + ( bText ? 1 : 0 ) );
memcpy ( dDst.Begin(), pData, uSize );
if ( bText )
{
dDst[uSize] = '\0';
dDst.Resize(uSize);
}
}
}
//////////////////////////////////////////////////////////////////////////
class Compressor_i
{
public:
virtual ~Compressor_i(){}
virtual bool Compress ( const VecTraits_T<BYTE> & dUncompressed, CSphVector<BYTE> & dCompressed ) const = 0;
virtual bool Decompress ( const VecTraits_T<BYTE> & dCompressed, VecTraits_T<BYTE> & dDecompressed ) const = 0;
};
class Compressor_None_c : public Compressor_i
{
public:
bool Compress ( const VecTraits_T<BYTE> & dUncompressed, CSphVector<BYTE> & dCompressed ) const final { return false; }
bool Decompress ( const VecTraits_T<BYTE> & dCompressed, VecTraits_T<BYTE> & dDecompressed ) const final { return true; }
};
class Compressor_LZ4_c : public Compressor_i
{
public:
bool Compress ( const VecTraits_T<BYTE> & dUncompressed, CSphVector<BYTE> & dCompressed ) const override;
bool Decompress ( const VecTraits_T<BYTE> & dCompressed, VecTraits_T<BYTE> & dDecompressed ) const final;
protected:
virtual int DoCompression ( const VecTraits_T<BYTE> & dUncompressed, CSphVector<BYTE> & dCompressed ) const;
};
class Compressor_LZ4HC_c : public Compressor_LZ4_c
{
public:
Compressor_LZ4HC_c ( int iCompressionLevel );
protected:
int DoCompression ( const VecTraits_T<BYTE> & dUncompressed, CSphVector<BYTE> & dCompressed ) const final;
private:
int m_iCompressionLevel = DEFAULT_COMPRESSION_LEVEL;
};
bool Compressor_LZ4_c::Compress ( const VecTraits_T<BYTE> & dUncompressed, CSphVector<BYTE> & dCompressed ) const
{
const int MIN_COMPRESSIBLE_SIZE = 64;
if ( dUncompressed.GetLength() < MIN_COMPRESSIBLE_SIZE )
return false;
dCompressed.Resize ( int ( dUncompressed.GetLength()*1.5f ) );
int iCompressedSize = DoCompression ( dUncompressed, dCompressed );
const float WORST_COMPRESSION_RATIO = 0.95f;
if ( iCompressedSize<0 || float(iCompressedSize)/dUncompressed.GetLength() > WORST_COMPRESSION_RATIO )
return false;
dCompressed.Resize(iCompressedSize);
return true;
}
bool Compressor_LZ4_c::Decompress ( const VecTraits_T<BYTE> & dCompressed, VecTraits_T<BYTE> & dDecompressed ) const
{
int iRes = LZ4_decompress_safe ( (const char *)dCompressed.Begin(), (char *)dDecompressed.Begin(), dCompressed.GetLength(), dDecompressed.GetLength() );
return iRes==dDecompressed.GetLength();
}
int Compressor_LZ4_c::DoCompression ( const VecTraits_T<BYTE> & dUncompressed, CSphVector<BYTE> & dCompressed ) const
{
return LZ4_compress_default ( (const char *)dUncompressed.Begin(), (char *)dCompressed.Begin(), dUncompressed.GetLength(), dCompressed.GetLength() );
}
Compressor_LZ4HC_c::Compressor_LZ4HC_c ( int iCompressionLevel )
: m_iCompressionLevel ( iCompressionLevel )
{}
int Compressor_LZ4HC_c::DoCompression ( const VecTraits_T<BYTE> & dUncompressed, CSphVector<BYTE> & dCompressed ) const
{
return LZ4_compress_HC ( (const char *)dUncompressed.Begin(), (char *)dCompressed.Begin(), dUncompressed.GetLength(), dCompressed.GetLength(), m_iCompressionLevel );
}
std::unique_ptr<Compressor_i> CreateCompressor ( Compression_e eComp, int iCompressionLevel )
{
switch ( eComp )
{
case Compression_e::LZ4: return std::make_unique<Compressor_LZ4_c>();
case Compression_e::LZ4HC: return std::make_unique<Compressor_LZ4HC_c> ( iCompressionLevel );
default: return std::make_unique<Compressor_None_c>();
}
}
//////////////////////////////////////////////////////////////////////////
static CSphString BuildCompoundName ( const CSphString & sName, DocstoreDataType_e eType )
{
CSphString sCompound;
sCompound.SetSprintf ( "%d%s", eType, sName.cstr() );
return sCompound;
}
class DocstoreFields_c : public DocstoreFields_i
{
public:
struct Field_t
{
CSphString m_sName;
DocstoreDataType_e m_eType;
};
int AddField ( const CSphString & sName, DocstoreDataType_e eType ) final;
void RemoveField ( const CSphString & sName, DocstoreDataType_e eType ) final;
int GetFieldId ( const CSphString & sName, DocstoreDataType_e eType ) const final;
int GetNumFields() const final { return m_dFields.GetLength(); }
const Field_t & GetField ( int iField ) const { return m_dFields[iField]; }
void Load ( CSphReader & tReader );
void Save ( CSphWriter & tWriter );
private:
CSphVector<Field_t> m_dFields;
SmallStringHash_T<int> m_hFields;
};
int DocstoreFields_c::AddField ( const CSphString & sName, DocstoreDataType_e eType )
{
int iField = m_dFields.GetLength();
m_dFields.Add ( {sName, eType} );
m_hFields.Add ( iField, BuildCompoundName ( sName, eType ) );
return iField;
}
void DocstoreFields_c::RemoveField ( const CSphString & sName, DocstoreDataType_e eType )
{
int iFieldId = GetFieldId ( sName, eType );
if ( iFieldId==-1 )
return;
m_dFields.Remove ( iFieldId, 1 );
m_hFields.Reset();
ARRAY_FOREACH ( i, m_dFields )
m_hFields.Add ( i, BuildCompoundName ( m_dFields[i].m_sName, m_dFields[i].m_eType ) );
}
int DocstoreFields_c::GetFieldId ( const CSphString & sName, DocstoreDataType_e eType ) const
{
int * pField = m_hFields ( BuildCompoundName ( sName, eType ) );
return pField ? *pField : -1;
}
void DocstoreFields_c::Load ( CSphReader & tReader )
{
assert ( !GetNumFields() );
DWORD uNumFields = tReader.GetDword();
for ( int i = 0; i < (int)uNumFields; i++ )
{
auto eType = (DocstoreDataType_e)tReader.GetByte();
CSphString sName = tReader.GetString();
AddField ( sName, eType );
}
}
void DocstoreFields_c::Save ( CSphWriter & tWriter )
{
tWriter.PutDword ( GetNumFields() );
for ( int i = 0, iNumFields = GetNumFields(); i < iNumFields; ++i )
{
tWriter.PutByte ( GetField(i).m_eType );
tWriter.PutString ( GetField(i).m_sName );
}
}
//////////////////////////////////////////////////////////////////////////
struct BlockData_t
{
BYTE m_uFlags = 0;
DWORD m_uNumDocs = 0;
BYTE * m_pData = nullptr;
DWORD m_uSize = 0;
};
struct HashKey_t
{
int64_t m_iIndexId;
SphOffset_t m_tOffset;
bool operator == ( const HashKey_t & tKey ) const { return m_iIndexId==tKey.m_iIndexId && m_tOffset==tKey.m_tOffset; }
};
struct BlockUtil_t
{
static DWORD GetHash ( const HashKey_t & tKey )
{
DWORD uCRC32 = sphCRC32 ( &tKey.m_iIndexId, sizeof(tKey.m_iIndexId) );
return sphCRC32 ( &tKey.m_tOffset, sizeof(tKey.m_tOffset), uCRC32 );
}
static DWORD GetSize ( const BlockData_t & tValue ) { return tValue.m_uSize; }
static void Reset ( BlockData_t & tValue ) { SafeDeleteArray ( tValue.m_pData ); }
};
class BlockCache_c : public LRUCache_T<HashKey_t, BlockData_t, BlockUtil_t>
{
using BASE = LRUCache_T<HashKey_t, BlockData_t, BlockUtil_t>;
using BASE::BASE;
public:
void DeleteAll ( int64_t iIndexId ) { BASE::Delete ( [iIndexId]( const HashKey_t & tKey ){ return tKey.m_iIndexId==iIndexId; } ); }
static void Init ( int64_t iCacheSize );
static void Done() { SafeDelete(m_pBlockCache); }
static BlockCache_c * Get() { return m_pBlockCache; }
private:
static BlockCache_c * m_pBlockCache;
};
BlockCache_c * BlockCache_c::m_pBlockCache = nullptr;
void BlockCache_c::Init ( int64_t iCacheSize )
{
assert ( !m_pBlockCache );
if ( iCacheSize > 0 )
m_pBlockCache = new BlockCache_c(iCacheSize);
}
//////////////////////////////////////////////////////////////////////////
class DocstoreReaders_c
{
public:
~DocstoreReaders_c();
void CreateReader ( int64_t iSessionId, int64_t iIndexId, const CSphAutofile & tFile, DWORD uBlockSize );
CSphReader * GetReader ( int64_t iSessionId, int64_t iIndexId );
void DeleteBySessionId ( int64_t iSessionId );
void DeleteByIndexId ( int64_t iIndexId );
static void Init();
static void Done();
static DocstoreReaders_c * Get();
private:
struct HashKey_t
{
int64_t m_iSessionId;
int64_t m_iIndexId;
bool operator == ( const HashKey_t & tKey ) const;
static DWORD Hash ( const HashKey_t & tKey );
};
int m_iTotalReaderSize = 0;
CSphMutex m_tLock;
CSphOrderedHash<CSphReader *, HashKey_t, HashKey_t, 1024> m_tHash;
static DocstoreReaders_c * m_pReaders;
static const int MIN_READER_CACHE_SIZE = 262144;
static const int MAX_READER_CACHE_SIZE = 1048576;
static const int MAX_TOTAL_READER_SIZE = 8388608;
void Delete ( CSphReader * pReader, const HashKey_t tKey );
};
DocstoreReaders_c * DocstoreReaders_c::m_pReaders = nullptr;
bool DocstoreReaders_c::HashKey_t::operator == ( const HashKey_t & tKey ) const
{
return m_iSessionId==tKey.m_iSessionId && m_iIndexId==tKey.m_iIndexId;
}
DWORD DocstoreReaders_c::HashKey_t::Hash ( const HashKey_t & tKey )
{
DWORD uCRC32 = sphCRC32 ( &tKey.m_iSessionId, sizeof(tKey.m_iSessionId) );
return sphCRC32 ( &tKey.m_iIndexId, sizeof(tKey.m_iIndexId), uCRC32 );
}
DocstoreReaders_c::~DocstoreReaders_c()
{
for ( auto & tDocstore : m_tHash )
SafeDelete ( tDocstore.second );
}
void DocstoreReaders_c::CreateReader ( int64_t iSessionId, int64_t iIndexId, const CSphAutofile & tFile, DWORD uBlockSize )
{
ScopedMutex_t tLock(m_tLock);
if ( m_tHash ( { iSessionId, iIndexId } ) )
return;
int iBufferSize = (int)uBlockSize*8;
iBufferSize = Min ( iBufferSize, MAX_READER_CACHE_SIZE );
iBufferSize = Max ( iBufferSize, MIN_READER_CACHE_SIZE );
if ( iBufferSize<=(int)uBlockSize )
return;
if ( m_iTotalReaderSize+iBufferSize > MAX_TOTAL_READER_SIZE )
return;
CSphReader * pReader = new CSphReader ( nullptr, iBufferSize );
pReader->SetFile(tFile);
Verify ( m_tHash.Add ( pReader, {iSessionId, iIndexId} ) );
m_iTotalReaderSize += iBufferSize;
}
CSphReader * DocstoreReaders_c::GetReader ( int64_t iSessionId, int64_t iIndexId )
{
ScopedMutex_t tLock(m_tLock);
CSphReader ** ppReader = m_tHash ( { iSessionId, iIndexId } );
return ppReader ? *ppReader : nullptr;
}
void DocstoreReaders_c::Delete ( CSphReader * pReader, const HashKey_t tKey )
{
m_iTotalReaderSize -= pReader->GetBufferSize();
assert ( m_iTotalReaderSize>=0 );
SafeDelete(pReader);
m_tHash.Delete(tKey);
}
void DocstoreReaders_c::DeleteBySessionId ( int64_t iSessionId )
{
ScopedMutex_t tLock(m_tLock);
// fixme: create a separate (faster) lookup?
CSphVector<std::pair<CSphReader*,HashKey_t>> dToDelete;
for ( auto & tDocstore : m_tHash )
if ( tDocstore.first.m_iSessionId==iSessionId )
dToDelete.Add ( { tDocstore.second, tDocstore.first } );
for ( const auto & i : dToDelete )
Delete ( i.first, i.second );
}
void DocstoreReaders_c::DeleteByIndexId ( int64_t iIndexId )
{
ScopedMutex_t tLock(m_tLock);
// fixme: create a separate (faster) lookup?
CSphVector<std::pair<CSphReader*,HashKey_t>> dToDelete;
for ( auto& tDocstore : m_tHash )
if ( tDocstore.first.m_iIndexId==iIndexId )
dToDelete.Add ( { tDocstore.second, tDocstore.first } );
for ( const auto & i : dToDelete )
Delete ( i.first, i.second );
}
void DocstoreReaders_c::Init ()
{
assert(!m_pReaders);
m_pReaders = new DocstoreReaders_c;
}
void DocstoreReaders_c::Done()
{
SafeDelete(m_pReaders);
}
DocstoreReaders_c * DocstoreReaders_c::Get()
{
return m_pReaders;
}
//////////////////////////////////////////////////////////////////////////
static void CreateFieldRemap ( VecTraits_T<int> & dFieldInRset, const VecTraits_T<int> * pFieldIds )
{
if ( pFieldIds )
ARRAY_CONSTFOREACH ( i, dFieldInRset )
{
int * pFound = pFieldIds->BinarySearch(i);
dFieldInRset[i] = pFound ? pFieldIds->Idx ( pFound ) : -1;
}
else
ARRAY_CONSTFOREACH ( i, dFieldInRset )
dFieldInRset[i] = i;
}
//////////////////////////////////////////////////////////////////////////
class Docstore_c : public Docstore_i, public DocstoreSettings_t
{
friend class DocstoreChecker_c;
public:
Docstore_c ( int64_t iIndexId, const CSphString & sFilename );
~Docstore_c() override;
bool Init ( CSphString & sError );
int GetFieldId ( const CSphString & sName, DocstoreDataType_e eType ) const final;
void CreateReader ( int64_t iSessionId ) const final;
DocstoreDoc_t GetDoc ( RowID_t tRowID, const VecTraits_T<int> * pFieldIds, int64_t iSessionId, bool bPack ) const final;
DocstoreSettings_t GetDocstoreSettings() const final;
private:
struct Block_t
{
SphOffset_t m_tOffset = 0;
DWORD m_uSize = 0;
DWORD m_uHeaderSize = 0;
RowID_t m_tRowID = INVALID_ROWID;
BlockType_e m_eType = BLOCK_TYPE_SMALL;
};
struct FieldInfo_t
{
BYTE m_uFlags = 0;
DWORD m_uCompressedLen = 0;
DWORD m_uUncompressedLen = 0;
};
int64_t m_iIndexId = 0;
CSphString m_sFilename;
CSphAutofile m_tFile;
CSphFixedVector<Block_t> m_dBlocks{0};
std::unique_ptr<Compressor_i> m_pCompressor;
DocstoreFields_c m_tFields;
const Block_t * FindBlock ( RowID_t tRowID ) const;
void ReadFromFile ( BYTE * pData, int iLength, SphOffset_t tOffset, int64_t iSessionId ) const;
DocstoreDoc_t ReadDocFromSmallBlock ( const Block_t & tBlock, RowID_t tRowID, const VecTraits_T<int> * pFieldIds, int64_t iSessionId, bool bPack ) const;
DocstoreDoc_t ReadDocFromBigBlock ( const Block_t & tBlock, const VecTraits_T<int> * pFieldIds, int64_t iSessionId, bool bPack ) const;
BlockData_t UncompressSmallBlock ( const Block_t & tBlock, int64_t iSessionId ) const;
BlockData_t UncompressBigBlockField ( SphOffset_t tOffset, const FieldInfo_t & tInfo, int64_t iSessionId ) const;
bool ProcessSmallBlockDoc ( RowID_t tCurDocRowID, RowID_t tRowID, const VecTraits_T<int> * pFieldIds, const CSphFixedVector<int> & dFieldInRset, bool bPack, MemoryReader2_c & tReader, CSphBitvec & tEmptyFields, DocstoreDoc_t & tResult ) const;
void ProcessBigBlockField ( int iField, const FieldInfo_t & tInfo, int iFieldInRset, bool bPack, int64_t iSessionId, SphOffset_t & tOffset, DocstoreDoc_t & tResult ) const;
};
Docstore_c::Docstore_c ( int64_t iIndexId, const CSphString & sFilename )
: m_iIndexId ( iIndexId )
, m_sFilename ( sFilename )
{}
Docstore_c::~Docstore_c ()
{
BlockCache_c * pBlockCache = BlockCache_c::Get();
if ( pBlockCache )
pBlockCache->DeleteAll(m_iIndexId);
DocstoreReaders_c * pReaders = DocstoreReaders_c::Get();
if ( pReaders )
pReaders->DeleteByIndexId(m_iIndexId);
}
bool Docstore_c::Init ( CSphString & sError )
{
CSphAutoreader tReader;
if ( !tReader.Open ( m_sFilename, sError ) )
return false;
DWORD uStorageVersion = tReader.GetDword();
if ( uStorageVersion > STORAGE_VERSION )
{
sError.SetSprintf ( "Unable to load docstore: %s is v.%d, binary is v.%d", m_sFilename.cstr(), uStorageVersion, STORAGE_VERSION );
return false;
}
m_uBlockSize = tReader.GetDword();
m_eCompression = Byte2Compression ( tReader.GetByte() );
m_pCompressor = CreateCompressor ( m_eCompression, m_iCompressionLevel );
if ( !m_pCompressor )
return false;
m_tFields.Load(tReader);
DWORD uNumBlocks = tReader.GetDword();
if ( !uNumBlocks )
return true;
SphOffset_t tHeaderOffset = tReader.GetOffset();
tReader.SeekTo ( tHeaderOffset, 0 );
m_dBlocks.Reset(uNumBlocks);
DWORD tPrevBlockRowID = 0;
SphOffset_t tPrevBlockOffset = 0;
for ( auto & i : m_dBlocks )
{
i.m_tRowID = tReader.UnzipRowid() + tPrevBlockRowID;
i.m_eType = (BlockType_e)tReader.GetByte();
i.m_tOffset = tReader.UnzipOffset() + tPrevBlockOffset;
if ( i.m_eType==BLOCK_TYPE_BIG )
i.m_uHeaderSize = tReader.UnzipInt();
tPrevBlockRowID = i.m_tRowID;
tPrevBlockOffset = i.m_tOffset;
}
for ( int i = 1; i<m_dBlocks.GetLength(); i++ )
m_dBlocks[i-1].m_uSize = m_dBlocks[i].m_tOffset-m_dBlocks[i-1].m_tOffset;
m_dBlocks.Last().m_uSize = tHeaderOffset-m_dBlocks.Last().m_tOffset;
if ( tReader.GetErrorFlag() )
return false;
tReader.Close();
if ( m_tFile.Open ( m_sFilename, SPH_O_READ, sError ) < 0 )
return false;
return true;
}
const Docstore_c::Block_t * Docstore_c::FindBlock ( RowID_t tRowID ) const
{
const Block_t * pFound = sphBinarySearchFirst ( m_dBlocks.Begin(), m_dBlocks.End()-1, bind(&Block_t::m_tRowID), tRowID );
assert(pFound);
if ( pFound->m_tRowID>tRowID )
{
if ( pFound==m_dBlocks.Begin() )
return nullptr;
return pFound-1;
}
return pFound;
}
void Docstore_c::CreateReader ( int64_t iSessionId ) const
{
DocstoreReaders_c * pReaders = DocstoreReaders_c::Get();
if ( pReaders )
pReaders->CreateReader ( iSessionId, m_iIndexId, m_tFile, m_uBlockSize );
}
int Docstore_c::GetFieldId ( const CSphString & sName, DocstoreDataType_e eType ) const
{
return m_tFields.GetFieldId (sName, eType );
}
DocstoreDoc_t Docstore_c::GetDoc ( RowID_t tRowID, const VecTraits_T<int> * pFieldIds, int64_t iSessionId, bool bPack ) const
{
#ifndef NDEBUG
// assume that field ids are sorted
for ( int i = 1; pFieldIds && i < pFieldIds->GetLength(); ++i )
assert ( (*pFieldIds)[i-1] < (*pFieldIds)[i] );
#endif
const Block_t * pBlock = FindBlock(tRowID);
assert ( pBlock );
if ( pBlock->m_eType==BLOCK_TYPE_SMALL )
return ReadDocFromSmallBlock ( *pBlock, tRowID, pFieldIds, iSessionId, bPack );
else
return ReadDocFromBigBlock ( *pBlock, pFieldIds, iSessionId, bPack );
}
struct ScopedBlock_t
{
int64_t m_iIndexId = INT64_MAX;
SphOffset_t m_tOffset = 0;
~ScopedBlock_t()
{
if ( m_iIndexId==INT64_MAX )
return;
BlockCache_c * pBlockCache = BlockCache_c::Get();
assert ( pBlockCache );
pBlockCache->Release ( { m_iIndexId, m_tOffset } );
}
};
void Docstore_c::ReadFromFile ( BYTE * pData, int iLength, SphOffset_t tOffset, int64_t iSessionId ) const
{
DocstoreReaders_c * pReaders = DocstoreReaders_c::Get();
CSphReader * pReader = nullptr;
if ( pReaders )
pReader = pReaders->GetReader ( iSessionId, m_iIndexId );
if ( pReader )
{
pReader->SeekTo ( tOffset, iLength );
pReader->GetBytes ( pData, iLength );
}
else
sphPread ( m_tFile.GetFD(), pData, iLength, tOffset );
}
BlockData_t Docstore_c::UncompressSmallBlock ( const Block_t & tBlock, int64_t iSessionId ) const
{
BlockData_t tResult;
CSphFixedVector<BYTE> dBlock ( tBlock.m_uSize );
ReadFromFile ( dBlock.Begin(), dBlock.GetLength(), tBlock.m_tOffset, iSessionId );
MemoryReader2_c tBlockReader ( dBlock.Begin(), dBlock.GetLength() );
tResult.m_uFlags = tBlockReader.GetVal<BYTE>();
tResult.m_uNumDocs = tBlockReader.UnzipInt();
tResult.m_uSize = tBlockReader.UnzipInt();
DWORD uCompressedLength = tResult.m_uSize;
bool bCompressed = tResult.m_uFlags & BLOCK_FLAG_COMPRESSED;
if ( bCompressed )
uCompressedLength = tBlockReader.UnzipInt();
const BYTE * pBody = dBlock.Begin() + tBlockReader.GetPos();
CSphFixedVector<BYTE> dDecompressed(0);
if ( bCompressed )
{
dDecompressed.Reset ( tResult.m_uSize );
Verify ( m_pCompressor->Decompress ( VecTraits_T<const BYTE> (pBody, uCompressedLength), dDecompressed) );
tResult.m_pData = dDecompressed.LeakData();
}
else
{
// we can't just pass tResult.m_pData because it doesn't point to the start of the allocated block
tResult.m_pData = new BYTE[tResult.m_uSize];
memcpy ( tResult.m_pData, pBody, tResult.m_uSize );
}
return tResult;
}
bool Docstore_c::ProcessSmallBlockDoc ( RowID_t tCurDocRowID, RowID_t tRowID, const VecTraits_T<int> * pFieldIds, const CSphFixedVector<int> & dFieldInRset, bool bPack, MemoryReader2_c & tReader, CSphBitvec & tEmptyFields, DocstoreDoc_t & tResult ) const
{
bool bDocFound = tCurDocRowID==tRowID;
if ( bDocFound )
tResult.m_dFields.Resize ( pFieldIds ? pFieldIds->GetLength() : m_tFields.GetNumFields() );
DWORD uBitMaskSize = tEmptyFields.GetSizeBytes();
BYTE uDocFlags = tReader.GetVal<BYTE>();
if ( uDocFlags & DOC_FLAG_ALL_EMPTY )
{
for ( auto & i : tResult.m_dFields )
i.Resize(0);
return bDocFound;
}
bool bHasBitmask = !!(uDocFlags & DOC_FLAG_EMPTY_BITMASK);
if ( bHasBitmask )
{
memcpy ( tEmptyFields.Begin(), tReader.Begin()+tReader.GetPos(), uBitMaskSize );
tReader.SetPos ( tReader.GetPos()+uBitMaskSize );
}
for ( int iField = 0; iField < m_tFields.GetNumFields(); iField++ )
if ( !bHasBitmask || !tEmptyFields.BitGet(iField) )
{
DWORD uFieldLength = tReader.UnzipInt();
int iFieldInRset = dFieldInRset[iField];
if ( bDocFound && iFieldInRset!=-1 )
PackData ( tResult.m_dFields[iFieldInRset], tReader.Begin()+tReader.GetPos(), uFieldLength, m_tFields.GetField(iField).m_eType==DOCSTORE_TEXT, bPack );
tReader.SetPos ( tReader.GetPos()+uFieldLength );
}
return bDocFound;
}
DocstoreDoc_t Docstore_c::ReadDocFromSmallBlock ( const Block_t & tBlock, RowID_t tRowID, const VecTraits_T<int> * pFieldIds, int64_t iSessionId, bool bPack ) const
{
BlockCache_c * pBlockCache = BlockCache_c::Get();
BlockData_t tBlockData;
bool bFromCache = pBlockCache && pBlockCache->Find ( { m_iIndexId, tBlock.m_tOffset }, tBlockData );
if ( !bFromCache )
{
tBlockData = UncompressSmallBlock ( tBlock, iSessionId );
bFromCache = pBlockCache && pBlockCache->Add ( { m_iIndexId, tBlock.m_tOffset }, tBlockData );
}
ScopedBlock_t tScopedBlock;
CSphFixedVector<BYTE> tDataPtr {0}; // scoped array ptr
if ( bFromCache )
{
tScopedBlock.m_iIndexId = m_iIndexId;
tScopedBlock.m_tOffset = tBlock.m_tOffset;
}
else
tDataPtr.Set ( tBlockData.m_pData, 0 );
CSphFixedVector<int> dFieldInRset ( m_tFields.GetNumFields() );
CreateFieldRemap ( dFieldInRset, pFieldIds );
DocstoreDoc_t tResult;
RowID_t tCurDocRowID = tBlock.m_tRowID;
MemoryReader2_c tReader ( tBlockData.m_pData, tBlockData.m_uSize );
CSphBitvec tEmptyFields ( m_tFields.GetNumFields() );
for ( int i = 0; i < (int)tBlockData.m_uNumDocs; i++ )
{
if ( ProcessSmallBlockDoc ( tCurDocRowID, tRowID, pFieldIds, dFieldInRset, bPack, tReader, tEmptyFields, tResult ) )
break;
tCurDocRowID++;
}
return tResult;
}
BlockData_t Docstore_c::UncompressBigBlockField ( SphOffset_t tOffset, const FieldInfo_t & tInfo, int64_t iSessionId ) const
{
BlockData_t tResult;
bool bCompressed = !!( tInfo.m_uFlags & FIELD_FLAG_COMPRESSED );
DWORD uDataLen = bCompressed ? tInfo.m_uCompressedLen : tInfo.m_uUncompressedLen;
CSphFixedVector<BYTE> dField ( uDataLen );
ReadFromFile ( dField.Begin(), dField.GetLength(), tOffset, iSessionId );
tResult.m_uSize = tInfo.m_uUncompressedLen;
CSphFixedVector<BYTE> dDecompressed(0);
if ( bCompressed )
{
dDecompressed.Reset ( tResult.m_uSize );
Verify ( m_pCompressor->Decompress ( dField, dDecompressed ) );
tResult.m_pData = dDecompressed.LeakData();
}
else
tResult.m_pData = dField.LeakData();
return tResult;
}
void Docstore_c::ProcessBigBlockField ( int iField, const FieldInfo_t & tInfo, int iFieldInRset, bool bPack, int64_t iSessionId, SphOffset_t & tOffset, DocstoreDoc_t & tResult ) const
{
if ( tInfo.m_uFlags & FIELD_FLAG_EMPTY )
return;
bool bCompressed = !!( tInfo.m_uFlags & FIELD_FLAG_COMPRESSED );
SphOffset_t tOffsetDelta = bCompressed ? tInfo.m_uCompressedLen : tInfo.m_uUncompressedLen;
if ( iFieldInRset==-1 )
{
tOffset += tOffsetDelta;
return;
}
BlockCache_c * pBlockCache = BlockCache_c::Get();
BlockData_t tBlockData;
bool bFromCache = pBlockCache && pBlockCache->Find ( { m_iIndexId, tOffset }, tBlockData );
if ( !bFromCache )
{
tBlockData = UncompressBigBlockField ( tOffset, tInfo, iSessionId );
bFromCache = pBlockCache && pBlockCache->Add ( { m_iIndexId, tOffset }, tBlockData );
}
ScopedBlock_t tScopedBlock;
CSphFixedVector<BYTE> tDataPtr {0}; // scoped array ptr
if ( bFromCache )
{
tScopedBlock.m_iIndexId = m_iIndexId;
tScopedBlock.m_tOffset = tOffset;
}
else
tDataPtr.Set ( tBlockData.m_pData, 0 );
PackData ( tResult.m_dFields[iFieldInRset], tBlockData.m_pData, tBlockData.m_uSize, m_tFields.GetField(iField).m_eType==DOCSTORE_TEXT, bPack );
tOffset += tOffsetDelta;
}
DocstoreDoc_t Docstore_c::ReadDocFromBigBlock ( const Block_t & tBlock, const VecTraits_T<int> * pFieldIds, int64_t iSessionId, bool bPack ) const
{
CSphFixedVector<FieldInfo_t> dFieldInfo ( m_tFields.GetNumFields() );
CSphFixedVector<BYTE> dBlockHeader(tBlock.m_uHeaderSize);
ReadFromFile ( dBlockHeader.Begin(), dBlockHeader.GetLength(), tBlock.m_tOffset, iSessionId );
MemoryReader2_c tReader ( dBlockHeader.Begin(), dBlockHeader.GetLength() );
CSphVector<int> dFieldSort;
BYTE uBlockFlags = tReader.GetVal<BYTE>();
bool bNeedReorder = !!( uBlockFlags & BLOCK_FLAG_FIELD_REORDER );
if ( bNeedReorder )
{
dFieldSort.Resize ( m_tFields.GetNumFields() );
for ( auto & i : dFieldSort )
i = tReader.UnzipInt();
}
for ( int i = 0; i < m_tFields.GetNumFields(); i++ )
{
int iField = bNeedReorder ? dFieldSort[i] : i;
FieldInfo_t & tInfo = dFieldInfo[iField];
tInfo.m_uFlags = tReader.GetVal<BYTE>();
if ( tInfo.m_uFlags & FIELD_FLAG_EMPTY )
continue;
tInfo.m_uUncompressedLen = tReader.UnzipInt();
if ( tInfo.m_uFlags & FIELD_FLAG_COMPRESSED )
tInfo.m_uCompressedLen = tReader.UnzipInt();
}
dBlockHeader.Reset(0);
CSphFixedVector<int> dFieldInRset ( m_tFields.GetNumFields() );
CreateFieldRemap ( dFieldInRset, pFieldIds );
DocstoreDoc_t tResult;
tResult.m_dFields.Resize ( pFieldIds ? pFieldIds->GetLength() : m_tFields.GetNumFields() );
SphOffset_t tOffset = tBlock.m_tOffset+tBlock.m_uHeaderSize;
// i == physical field order in file
// dFieldSort[i] == field order as in m_dFields
// dFieldInRset[iField] == field order in result set
for ( int i = 0; i < m_tFields.GetNumFields(); i++ )
{
int iField = bNeedReorder ? dFieldSort[i] : i;
ProcessBigBlockField ( iField, dFieldInfo[iField], dFieldInRset[iField], bPack, iSessionId, tOffset, tResult );
}
return tResult;
}
DocstoreSettings_t Docstore_c::GetDocstoreSettings() const
{
return *this;
}
//////////////////////////////////////////////////////////////////////////
DocstoreBuilder_i::Doc_t::Doc_t()
{}
DocstoreBuilder_i::Doc_t::Doc_t ( const DocstoreDoc_t & tDoc )
{
m_dFields.Resize ( tDoc.m_dFields.GetLength() );
ARRAY_FOREACH ( i, m_dFields )
m_dFields[i] = VecTraits_T<BYTE> ( tDoc.m_dFields[i].Begin(), tDoc.m_dFields[i].GetLength() );
}
//////////////////////////////////////////////////////////////////////////
class DocstoreBuilder_c : public DocstoreBuilder_i, public DocstoreSettings_t
{
public:
DocstoreBuilder_c ( CSphString sFilename, const DocstoreSettings_t & tSettings, int iBufferSize );
bool Init ( CSphString & sError );
void AddDoc ( RowID_t tRowID, const Doc_t & tDoc ) final;
int AddField ( const CSphString & sName, DocstoreDataType_e eType ) final { return m_tFields.AddField ( sName, eType ); }
void RemoveField ( const CSphString & sName, DocstoreDataType_e eType ) final { return m_tFields.RemoveField ( sName, eType ); }
int GetFieldId ( const CSphString & sName, DocstoreDataType_e eType ) const final { return m_tFields.GetFieldId ( sName, eType ); }
void Finalize() final;
private:
struct StoredDoc_t
{
RowID_t m_tRowID;
CSphVector<CSphVector<BYTE>> m_dFields;
};
CSphString m_sFilename;
CSphVector<StoredDoc_t> m_dStoredDocs;
CSphVector<BYTE> m_dHeader;
CSphVector<BYTE> m_dBuffer;
std::unique_ptr<Compressor_i> m_pCompressor;
MemoryWriter2_c m_tHeaderWriter;
CSphWriter m_tWriter;
DocstoreFields_c m_tFields;
int m_iBufferSize = 0;
DWORD m_uStoredLen = 0;
int m_iNumBlocks = 0;
SphOffset_t m_tHeaderOffset = 0;
SphOffset_t m_tPrevBlockOffset = 0;
DWORD m_tPrevBlockRowID = 0;
using SortedField_t = std::pair<int,int>;
CSphVector<SortedField_t> m_dFieldSort;
CSphVector<CSphVector<BYTE>> m_dCompressedBuffers;
void WriteInitialHeader();
void WriteTrailingHeader();
void WriteBlock();
void WriteSmallBlockHeader ( SphOffset_t tBlockOffset );
void WriteBigBlockHeader ( SphOffset_t tBlockOffset, SphOffset_t tHeaderSize );
void WriteSmallBlock();
void WriteBigBlock();
};
DocstoreBuilder_c::DocstoreBuilder_c ( CSphString sFilename, const DocstoreSettings_t & tSettings, int iBufferSize )
: m_sFilename ( std::move (sFilename) )
, m_tHeaderWriter ( m_dHeader )
, m_iBufferSize ( iBufferSize )
{
*(DocstoreSettings_t*)this = tSettings;
}
bool DocstoreBuilder_c::Init ( CSphString & sError )
{
m_pCompressor = CreateCompressor ( m_eCompression, m_iCompressionLevel );
if ( !m_pCompressor )
return false;
m_tWriter.SetBufferSize(m_iBufferSize);
return m_tWriter.OpenFile ( m_sFilename, sError );
}
void DocstoreBuilder_c::AddDoc ( RowID_t tRowID, const Doc_t & tDoc )
{
assert ( tDoc.m_dFields.GetLength()==m_tFields.GetNumFields() );
DWORD uLen = 0;
for ( const auto & i : tDoc.m_dFields )
uLen += i.GetLength();
if ( m_uStoredLen+uLen > m_uBlockSize )
WriteBlock();
StoredDoc_t & tStoredDoc = m_dStoredDocs.Add();
tStoredDoc.m_tRowID = tRowID;
tStoredDoc.m_dFields.Resize ( m_tFields.GetNumFields() );
for ( int i = 0; i<m_tFields.GetNumFields(); i++ )
{
int iLen = tDoc.m_dFields[i].GetLength();
// remove trailing zero
if ( m_tFields.GetField(i).m_eType==DOCSTORE_TEXT && iLen>0 && tDoc.m_dFields[i][iLen-1]=='\0' )
iLen--;
tStoredDoc.m_dFields[i].Resize(iLen);
memcpy ( tStoredDoc.m_dFields[i].Begin(), tDoc.m_dFields[i].Begin(), iLen );
}
m_uStoredLen += uLen;
}
void DocstoreBuilder_c::Finalize()
{
WriteBlock();
WriteTrailingHeader();
}
void DocstoreBuilder_c::WriteInitialHeader()
{
m_tWriter.PutDword ( STORAGE_VERSION );
m_tWriter.PutDword ( m_uBlockSize );
m_tWriter.PutByte ( Compression2Byte(m_eCompression) );
m_tFields.Save(m_tWriter);
m_tHeaderOffset = m_tWriter.GetPos();
// reserve space for number of blocks
m_tWriter.PutDword(0);
// reserve space for header offset
m_tWriter.PutOffset(0);
}
void DocstoreBuilder_c::WriteTrailingHeader()
{
SphOffset_t tHeaderPos = m_tWriter.GetPos();
// write header
m_tWriter.PutBytes ( m_dHeader.Begin(), m_dHeader.GetLength() );
// rewind to the beginning, store num_blocks, offset to header
m_tWriter.Flush(); // flush is necessary, see similar code in BlobRowBuilder_File_c::Done
m_tWriter.SeekTo(m_tHeaderOffset);
m_tWriter.PutDword(m_iNumBlocks);
m_tWriter.PutOffset(tHeaderPos);
m_tWriter.CloseFile();
}
void DocstoreBuilder_c::WriteSmallBlockHeader ( SphOffset_t tBlockOffset )
{
m_tHeaderWriter.ZipInt ( m_dStoredDocs[0].m_tRowID-m_tPrevBlockRowID ); // initial block rowid delta
m_tHeaderWriter.PutByte ( BLOCK_TYPE_SMALL ); // block type
m_tHeaderWriter.ZipOffset ( tBlockOffset-m_tPrevBlockOffset ); // block offset delta
m_tPrevBlockOffset = tBlockOffset;
m_tPrevBlockRowID = m_dStoredDocs[0].m_tRowID;
}
void DocstoreBuilder_c::WriteBigBlockHeader ( SphOffset_t tBlockOffset, SphOffset_t tHeaderSize )
{
m_tHeaderWriter.ZipInt ( m_dStoredDocs[0].m_tRowID-m_tPrevBlockRowID ); // initial block rowid delta
m_tHeaderWriter.PutByte ( BLOCK_TYPE_BIG ); // block type
m_tHeaderWriter.ZipOffset ( tBlockOffset-m_tPrevBlockOffset ); // block offset delta
m_tHeaderWriter.ZipInt ( tHeaderSize ); // on-disk header size
m_tPrevBlockOffset = tBlockOffset;
m_tPrevBlockRowID = m_dStoredDocs[0].m_tRowID;
}
void DocstoreBuilder_c::WriteSmallBlock()
{
m_dCompressedBuffers.Resize(1);
m_dBuffer.Resize(0);
MemoryWriter2_c tMemWriter ( m_dBuffer );
#ifndef NDEBUG
for ( int i=1; i < m_dStoredDocs.GetLength(); i++ )
assert ( m_dStoredDocs[i].m_tRowID-m_dStoredDocs[i-1].m_tRowID==1 );
#endif // !NDEBUG
CSphBitvec tEmptyFields ( m_tFields.GetNumFields() );
for ( const auto & tDoc : m_dStoredDocs )
{
tEmptyFields.Clear();
ARRAY_FOREACH ( iField, tDoc.m_dFields )
if ( !tDoc.m_dFields[iField].GetLength() )
tEmptyFields.BitSet(iField);
int iEmptyFields = tEmptyFields.BitCount();
if ( iEmptyFields==m_tFields.GetNumFields() )
tMemWriter.PutByte ( DOC_FLAG_ALL_EMPTY );
else
{
bool bNeedsBitmask = iEmptyFields && ( tEmptyFields.GetSizeBytes() < iEmptyFields );
tMemWriter.PutByte ( bNeedsBitmask ? DOC_FLAG_EMPTY_BITMASK : 0 );
if ( bNeedsBitmask )
tMemWriter.PutBytes ( tEmptyFields.Begin(), tEmptyFields.GetSizeBytes() );
ARRAY_FOREACH ( iField, tDoc.m_dFields )
if ( !bNeedsBitmask || !tEmptyFields.BitGet(iField) )
{
const CSphVector<BYTE> & tField = tDoc.m_dFields[iField];
tMemWriter.ZipInt ( tField.GetLength() );
tMemWriter.PutBytes ( tField.Begin(), tField.GetLength() );
}
}
}
CSphVector<BYTE> & dCompressedBuffer = m_dCompressedBuffers[0];
BYTE uBlockFlags = 0;
bool bCompressed = m_pCompressor->Compress ( m_dBuffer, dCompressedBuffer );
if ( bCompressed )
uBlockFlags |= BLOCK_FLAG_COMPRESSED;
WriteSmallBlockHeader ( m_tWriter.GetPos() );
m_tWriter.PutByte ( uBlockFlags ); // block flags
m_tWriter.ZipInt ( m_dStoredDocs.GetLength() ); // num docs
m_tWriter.ZipInt ( m_dBuffer.GetLength() ); // uncompressed length
if ( bCompressed )
m_tWriter.ZipInt ( dCompressedBuffer.GetLength() ); // compressed length
// body data
if ( bCompressed )
m_tWriter.PutBytes ( dCompressedBuffer.Begin(), dCompressedBuffer.GetLength() ); // compressed data
else
m_tWriter.PutBytes ( m_dBuffer.Begin(), m_dBuffer.GetLength() ); // uncompressed data
}
void DocstoreBuilder_c::WriteBigBlock()
{
assert ( m_dStoredDocs.GetLength()==1 );
StoredDoc_t & tDoc = m_dStoredDocs[0];
m_dCompressedBuffers.Resize ( m_tFields.GetNumFields() );
bool bNeedReorder = false;
CSphBitvec tCompressedFields ( m_tFields.GetNumFields() );
int iPrevSize = 0;
ARRAY_FOREACH ( iField, tDoc.m_dFields )
{
const CSphVector<BYTE> & dField = tDoc.m_dFields[iField];
CSphVector<BYTE> & dCompressedBuffer = m_dCompressedBuffers[iField];
bool bCompressed = m_pCompressor->Compress ( dField, dCompressedBuffer );
if ( bCompressed )
tCompressedFields.BitSet(iField);
int iStoredSize = bCompressed ? dCompressedBuffer.GetLength() : dField.GetLength();
bNeedReorder |= iStoredSize < iPrevSize;
iPrevSize = dCompressedBuffer.GetLength();
}
if ( bNeedReorder )
{
m_dFieldSort.Resize ( m_tFields.GetNumFields() );
ARRAY_FOREACH ( iField, tDoc.m_dFields )
{
m_dFieldSort[iField].first = iField;
m_dFieldSort[iField].second = tCompressedFields.BitGet(iField) ? m_dCompressedBuffers[iField].GetLength() : tDoc.m_dFields[iField].GetLength();
}
m_dFieldSort.Sort ( ::bind(&SortedField_t::second) );
}
SphOffset_t tOnDiskHeaderStart = m_tWriter.GetPos();
BYTE uBlockFlags = bNeedReorder ? BLOCK_FLAG_FIELD_REORDER : 0;
m_tWriter.PutByte(uBlockFlags); // block flags
if ( bNeedReorder )
{
for ( const auto & i : m_dFieldSort )
m_tWriter.ZipInt(i.first); // field reorder map
}
for ( int i = 0; i < m_tFields.GetNumFields(); i++ )
{
int iField = bNeedReorder ? m_dFieldSort[i].first : i;
bool bCompressed = tCompressedFields.BitGet(iField);
bool bEmpty = !tDoc.m_dFields[iField].GetLength();
BYTE uFieldFlags = 0;
uFieldFlags |= bCompressed ? FIELD_FLAG_COMPRESSED : 0;
uFieldFlags |= bEmpty ? FIELD_FLAG_EMPTY : 0;
m_tWriter.PutByte(uFieldFlags); // field flags
if ( bEmpty )
continue;
m_tWriter.ZipInt ( tDoc.m_dFields[iField].GetLength() ); // uncompressed len
if ( bCompressed )
m_tWriter.ZipInt ( m_dCompressedBuffers[iField].GetLength() ); // compressed len (if compressed)
}
SphOffset_t tOnDiskHeaderSize = m_tWriter.GetPos() - tOnDiskHeaderStart;
for ( int i = 0; i < m_tFields.GetNumFields(); i++ )
{
int iField = bNeedReorder ? m_dFieldSort[i].first : i;
bool bCompressed = tCompressedFields.BitGet(iField);
bool bEmpty = !tDoc.m_dFields[iField].GetLength();
if ( bEmpty )
continue;
if ( bCompressed )
m_tWriter.PutBytes ( m_dCompressedBuffers[iField].Begin(), m_dCompressedBuffers[iField].GetLength() ); // compressed data
else
m_tWriter.PutBytes( tDoc.m_dFields[iField].Begin(), tDoc.m_dFields[iField].GetLength() ); // uncompressed data
}
WriteBigBlockHeader ( tOnDiskHeaderStart, tOnDiskHeaderSize );
}
void DocstoreBuilder_c::WriteBlock()
{
if ( !m_tWriter.GetPos() )
WriteInitialHeader();
if ( !m_dStoredDocs.GetLength() )
return;
bool bBigBlock = m_dStoredDocs.GetLength()==1 && m_uStoredLen>=m_uBlockSize;
if ( bBigBlock )
WriteBigBlock();
else
WriteSmallBlock();
m_iNumBlocks++;
m_uStoredLen = 0;
m_dStoredDocs.Resize(0);
}
//////////////////////////////////////////////////////////////////////////
class DocstoreRT_c : public DocstoreRT_i
{
public:
~DocstoreRT_c() override;
void AddDoc ( RowID_t tRowID, const DocstoreBuilder_i::Doc_t & tDoc ) final;
int AddField ( const CSphString & sName, DocstoreDataType_e eType ) final;
void RemoveField ( const CSphString & sName, DocstoreDataType_e eType ) final;
void Finalize() final {}
void SwapRows ( RowID_t tDstID, RowID_t tSrcID ) final;
void DropTail ( RowID_t tTailID ) final;
DocstoreDoc_t GetDoc ( RowID_t tRowID, const VecTraits_T<int> * pFieldIds, int64_t iSessionId, bool bPack ) const final;
int GetFieldId ( const CSphString & sName, DocstoreDataType_e eType ) const final;
DocstoreSettings_t GetDocstoreSettings() const final;
void CreateReader ( int64_t iSessionId ) const final {}
bool Load ( CSphReader & tReader ) final;
void Save ( Writer_i & tWriter ) final;
void Load ( MemoryReader_c & tReader ) final;
void Save ( MemoryWriter_c & tWriter ) final;
void AddPackedDoc ( RowID_t tRowID, const DocstoreRT_i * pSrcDocstore, RowID_t tSrcRowID ) final;
int64_t AllocatedBytes() const final;
static int GetDocSize ( const BYTE * pDoc, int iFieldCount );
bool CheckFieldsLoaded ( CSphString & sError ) const final;
private:
CSphVector<BYTE *> m_dDocs;
int m_iLoadedFieldCount = 0;
DocstoreFields_c m_tFields;
int64_t m_iAllocated = 0;
};
DocstoreRT_c::~DocstoreRT_c()
{
for ( auto & i : m_dDocs )
SafeDeleteArray(i);
}
void DocstoreRT_c::AddDoc ( RowID_t tRowID, const DocstoreBuilder_i::Doc_t & tDoc )
{
assert ( (RowID_t)(m_dDocs.GetLength())==tRowID );
CSphFixedVector<int> tFieldLengths(tDoc.m_dFields.GetLength());
int iPackedLen = 0;
ARRAY_FOREACH ( i, tDoc.m_dFields )
{
int iLen = tDoc.m_dFields[i].GetLength();
// remove trailing zero
if ( m_tFields.GetField(i).m_eType==DOCSTORE_TEXT && iLen>0 && tDoc.m_dFields[i][iLen-1]=='\0' )
iLen--;
iPackedLen += sphCalcZippedLen(iLen)+iLen;
tFieldLengths[i] = iLen;
}
BYTE * & pPacked = m_dDocs.Add();
pPacked = new BYTE[iPackedLen];
BYTE * pPtr = pPacked;
ARRAY_FOREACH ( i, tDoc.m_dFields )
pPtr += sphPackPtrAttr ( pPtr, {tDoc.m_dFields[i].Begin (), tFieldLengths[i]} );
m_iAllocated += iPackedLen;
assert ( pPtr-pPacked==iPackedLen );
}
void DocstoreRT_c::SwapRows ( RowID_t tDstID, RowID_t tSrcID )
{
assert ( tDstID!=INVALID_ROWID );
assert ( tSrcID!=INVALID_ROWID );
::Swap ( m_dDocs[tDstID], m_dDocs[tSrcID]);
}
void DocstoreRT_c::DropTail ( RowID_t tTailID )
{
int iFieldsCount = m_tFields.GetNumFields ();
for ( auto i = tTailID, iLen = (RowID_t) m_dDocs.GetLength (); i<iLen; ++i )
if ( m_dDocs[i])
{
m_iAllocated -= GetDocSize ( m_dDocs[i], iFieldsCount );
SafeDeleteArray( m_dDocs[i] );
}
m_dDocs.Resize ( tTailID );
}
int DocstoreRT_c::AddField ( const CSphString & sName, DocstoreDataType_e eType )
{
return m_tFields.AddField ( sName, eType );
}
void DocstoreRT_c::RemoveField ( const CSphString & sName, DocstoreDataType_e eType )
{
return m_tFields.RemoveField ( sName, eType );
}
DocstoreDoc_t DocstoreRT_c::GetDoc ( RowID_t tRowID, const VecTraits_T<int> * pFieldIds, int64_t iSessionId, bool bPack ) const
{
#ifndef NDEBUG
// assume that field ids are sorted
for ( int i = 1; pFieldIds && i < pFieldIds->GetLength(); i++ )
assert ( (*pFieldIds)[i-1] < (*pFieldIds)[i] );
#endif
CSphFixedVector<int> dFieldInRset ( m_tFields.GetNumFields() );
CreateFieldRemap ( dFieldInRset, pFieldIds );
DocstoreDoc_t tResult;
tResult.m_dFields.Resize ( pFieldIds ? pFieldIds->GetLength() : m_tFields.GetNumFields() );
const BYTE * pDoc = m_dDocs[tRowID];
for ( int iField = 0; iField < m_tFields.GetNumFields(); iField++ )
{
DWORD uFieldLength = UnzipIntBE(pDoc);
int iFieldInRset = dFieldInRset[iField];
if ( iFieldInRset!=-1 )
PackData ( tResult.m_dFields[iFieldInRset], pDoc, uFieldLength, m_tFields.GetField(iField).m_eType==DOCSTORE_TEXT, bPack );
pDoc += uFieldLength;
}
return tResult;
}
int DocstoreRT_c::GetFieldId ( const CSphString & sName, DocstoreDataType_e eType ) const
{
return m_tFields.GetFieldId ( sName, eType );
}
DocstoreSettings_t DocstoreRT_c::GetDocstoreSettings() const
{
assert ( 0 && "No settings for RT docstore" );
return DocstoreSettings_t();
}
int DocstoreRT_c::GetDocSize ( const BYTE * pDoc, int iFieldCount )
{
const BYTE * p = pDoc;
for ( int iField = 0; iField<iFieldCount; iField++ )
p += UnzipIntBE(p);
return p-pDoc;
}
template<typename T>
int64_t DocstoreLoad_T ( CSphVector<BYTE *> & dDocs, T & tReader )
{
int64_t iAllocated = 0;
DWORD uNumDocs = tReader.UnzipInt();
dDocs.Resize (uNumDocs);
for ( auto & i : dDocs )
{
DWORD uDocLen = tReader.UnzipInt();
i = new BYTE[uDocLen];
tReader.GetBytes ( i, uDocLen );
iAllocated += uDocLen;
}
return iAllocated;
}
template<typename T>
void DocstoreSave_T ( const CSphVector<BYTE *> & dDocs, int iFieldCount , T & tWriter )
{
tWriter.ZipInt ( dDocs.GetLength() );
for ( const auto & i : dDocs )
{
int iDocLen = DocstoreRT_c::GetDocSize ( i, iFieldCount );
tWriter.ZipInt ( iDocLen );
tWriter.PutBytes ( i, iDocLen );
}
}
bool DocstoreRT_c::Load ( CSphReader & tReader )
{
assert ( !m_dDocs.GetLength() && !m_iAllocated );
m_iAllocated += DocstoreLoad_T<CSphReader> ( m_dDocs, tReader );
return !tReader.GetErrorFlag();
}
void DocstoreRT_c::Save ( Writer_i & tWriter )
{
DocstoreSave_T<Writer_i> ( m_dDocs, m_tFields.GetNumFields(), tWriter );
}
void DocstoreRT_c::Load ( MemoryReader_c & tReader )
{
assert ( !m_dDocs.GetLength() && !m_iAllocated );
m_iLoadedFieldCount = tReader.GetDword();
m_iAllocated += DocstoreLoad_T<MemoryReader_c> ( m_dDocs, tReader );
}
void DocstoreRT_c::Save ( MemoryWriter_c & tWriter )
{
int iFieldCount = m_tFields.GetNumFields();
tWriter.PutDword ( iFieldCount );
DocstoreSave_T<MemoryWriter_c> ( m_dDocs, iFieldCount, tWriter );
}
bool DocstoreRT_c::CheckFieldsLoaded ( CSphString & sError ) const
{
if ( !m_iLoadedFieldCount )
return true;
int iFieldsCount = m_tFields.GetNumFields();
if ( m_iLoadedFieldCount!=iFieldsCount )
{
sError.SetSprintf ( "wrong fields count, loaded %d, stored %d", m_iLoadedFieldCount, iFieldsCount );
return false;
}
return true;
}
void DocstoreRT_c::AddPackedDoc ( RowID_t tRowID, const DocstoreRT_i * pSrcDocstore, RowID_t tSrcRowID )
{
const DocstoreRT_c * pSrc = (const DocstoreRT_c *)pSrcDocstore;
int iFieldsCount = m_tFields.GetNumFields();
assert ( iFieldsCount==pSrc->m_tFields.GetNumFields() );
// get raw doc and its length
const BYTE * pSrcPacked = pSrc->m_dDocs[tSrcRowID];
const int iSrcPackedLen = pSrc->GetDocSize ( pSrcPacked, iFieldsCount );
// copy doc into new place
BYTE * pDst = new BYTE[iSrcPackedLen];
memcpy ( pDst, pSrcPacked, iSrcPackedLen );
assert ( (RowID_t)(m_dDocs.GetLength())==tRowID );
m_dDocs.Add ( pDst );
m_iAllocated += GetDocSize ( pDst, iFieldsCount );
}
int64_t DocstoreRT_c::AllocatedBytes() const
{
return m_iAllocated + m_dDocs.AllocatedBytes();
}
//////////////////////////////////////////////////////////////////////////
std::atomic<int64_t> DocstoreSession_c::m_tUIDGenerator { 0 };
DocstoreSession_c::DocstoreSession_c()
: m_iUID ( m_tUIDGenerator.fetch_add ( 1, std::memory_order_relaxed ) )
{}
DocstoreSession_c::~DocstoreSession_c()
{
DocstoreReaders_c * pReaders = DocstoreReaders_c::Get();
if ( pReaders )
pReaders->DeleteBySessionId(m_iUID);
}
//////////////////////////////////////////////////////////////////////////
class DocstoreChecker_c
{
public:
DocstoreChecker_c ( CSphAutoreader & tReader, DebugCheckError_i & tReporter, int64_t iRowsCount );
bool Check();
private:
CSphAutoreader & m_tReader;
DebugCheckError_i & m_tReporter;
const char * m_szFilename = nullptr;
DocstoreFields_c m_tFields;
std::unique_ptr<Compressor_i> m_pCompressor;
int64_t m_iRowsCount = 0;
void CheckSmallBlockDoc ( MemoryReader2_c & tReader, CSphBitvec & tEmptyFields, SphOffset_t tOffset );
void CheckSmallBlock ( const Docstore_c::Block_t & tBlock );
void CheckBlock ( const Docstore_c::Block_t & tBlock );
void CheckBigBlockField ( const Docstore_c::FieldInfo_t & tInfo, SphOffset_t & tOffset );
void CheckBigBlock ( const Docstore_c::Block_t & tBlock );
};
DocstoreChecker_c::DocstoreChecker_c ( CSphAutoreader & tReader, DebugCheckError_i & tReporter, int64_t iRowsCount )
: m_tReader ( tReader )
, m_tReporter ( tReporter )
, m_szFilename ( tReader.GetFilename().cstr() )
, m_iRowsCount ( iRowsCount )
{}
bool DocstoreChecker_c::Check()
{
DWORD uStorageVersion = m_tReader.GetDword();
if ( uStorageVersion > STORAGE_VERSION )
return m_tReporter.Fail ( "Unable to load docstore: %s is v.%d, binary is v.%d", m_szFilename, uStorageVersion, STORAGE_VERSION );
m_tReader.GetDword(); // block size
BYTE uCompression = m_tReader.GetByte();
if ( uCompression > 2 )
return m_tReporter.Fail ( "Unknown docstore compression %u in %s", uCompression, m_szFilename );
Compression_e eCompression = Byte2Compression(uCompression);
m_pCompressor = CreateCompressor ( eCompression, DEFAULT_COMPRESSION_LEVEL );
if ( !m_pCompressor )
return m_tReporter.Fail ( "Unable to create compressor in %s", m_szFilename );
DWORD uNumFields = m_tReader.GetDword();
const DWORD MAX_SANE_FIELDS = 32768;
if ( uNumFields > MAX_SANE_FIELDS )
return m_tReporter.Fail ( "Too many docstore fields (%u) in %s", uNumFields, m_szFilename );
for ( int i = 0; i < (int)uNumFields; i++ )
{
BYTE uDataType = m_tReader.GetByte();
if ( uDataType > DOCSTORE_TOTAL )
return m_tReporter.Fail ( "Unknown docstore data type (%u) in %s", uDataType, m_szFilename );
DocstoreDataType_e eType = (DocstoreDataType_e)uDataType;
CSphString sName = m_tReader.GetString();
const int MAX_SANE_FIELD_NAME_LEN = 32768;
if ( sName.Length() > MAX_SANE_FIELD_NAME_LEN )
return m_tReporter.Fail ( "Docstore field name too long (%d) in %s", sName.Length(), m_szFilename );
m_tFields.AddField ( sName, eType );
}
DWORD uNumBlocks = m_tReader.GetDword();
// docstore from empty index
if ( !uNumBlocks )
{
if ( !m_iRowsCount )
return true;
return m_tReporter.Fail ( "Docstore has 0 blocks but " INT64_FMT " documents in %s", m_iRowsCount, m_szFilename );
}
SphOffset_t tHeaderOffset = m_tReader.GetOffset();
if ( tHeaderOffset <= 0 || tHeaderOffset >= m_tReader.GetFilesize() )
return m_tReporter.Fail ( "Wrong docstore header offset (" INT64_FMT ") in %s", tHeaderOffset, m_szFilename );
m_tReader.SeekTo ( tHeaderOffset, 0 );
CSphFixedVector<Docstore_c::Block_t> dBlocks(uNumBlocks);
DWORD tPrevBlockRowID = 0;
SphOffset_t tPrevBlockOffset = 0;
for ( auto & i : dBlocks )
{
RowID_t uUnzipped = m_tReader.UnzipRowid();
if ( (int64_t)uUnzipped + tPrevBlockRowID >= (int64_t)0xFFFFFFFF )
m_tReporter.Fail ( "Docstore rowid overflow in %s", m_szFilename );
i.m_tRowID = uUnzipped + tPrevBlockRowID;
BYTE uBlockType = m_tReader.GetByte();
if ( uBlockType>BLOCK_TYPE_TOTAL )
return m_tReporter.Fail ( "Unknown docstore block type (%u) in %s", uBlockType, m_szFilename );
i.m_eType = (BlockType_e)uBlockType;
i.m_tOffset = m_tReader.UnzipOffset() + tPrevBlockOffset;
if ( i.m_tOffset <= 0 || i.m_tOffset >= m_tReader.GetFilesize() )
return m_tReporter.Fail ( "Wrong docstore block offset (" INT64_FMT ") in %s", i.m_tOffset, m_szFilename );
if ( i.m_eType==BLOCK_TYPE_BIG )
i.m_uHeaderSize = m_tReader.UnzipInt();
tPrevBlockRowID = i.m_tRowID;
tPrevBlockOffset = i.m_tOffset;
}
for ( int i = 1; i<dBlocks.GetLength(); i++ )
{
if ( dBlocks[i-1].m_tOffset>=dBlocks[i].m_tOffset )
return m_tReporter.Fail ( "Descending docstore block offset in %s", m_szFilename );
dBlocks[i-1].m_uSize = dBlocks[i].m_tOffset-dBlocks[i-1].m_tOffset;
}
if ( dBlocks.GetLength() )
dBlocks.Last().m_uSize = tHeaderOffset-dBlocks.Last().m_tOffset;
for ( auto & i : dBlocks )
{
if ( i.m_tOffset+i.m_uSize > m_tReader.GetFilesize() )
return m_tReporter.Fail ( "Docstore block size+offset out of bounds in %s", m_szFilename );
CheckBlock(i);
}
if ( m_tReader.GetErrorFlag() )
return m_tReporter.Fail ( "%s", m_tReader.GetErrorMessage().cstr() );
return true;
}
void DocstoreChecker_c::CheckSmallBlockDoc ( MemoryReader2_c & tReader, CSphBitvec & tEmptyFields, SphOffset_t tOffset )
{
BYTE uDocFlags = tReader.GetVal<BYTE>();
if ( uDocFlags & ( ~(DOC_FLAG_ALL_EMPTY | DOC_FLAG_EMPTY_BITMASK) ) )
m_tReporter.Fail ( "Unknown docstore doc flag (%u) in %s (offset " INT64_FMT ")", uDocFlags, m_szFilename, tOffset );
if ( uDocFlags & DOC_FLAG_ALL_EMPTY )
return;
DWORD uBitMaskSize = tEmptyFields.GetSizeBytes();
bool bHasBitmask = !!(uDocFlags & DOC_FLAG_EMPTY_BITMASK);
if ( bHasBitmask )
{
memcpy ( tEmptyFields.Begin(), tReader.Begin()+tReader.GetPos(), uBitMaskSize );
tReader.SetPos ( tReader.GetPos()+uBitMaskSize );
}
for ( int iField = 0; iField < m_tFields.GetNumFields(); iField++ )
if ( !bHasBitmask || !tEmptyFields.BitGet(iField) )
{
DWORD uFieldLength = tReader.UnzipInt();
tReader.SetPos ( tReader.GetPos()+uFieldLength );
if ( tReader.GetPos() > tReader.GetLength() )
m_tReporter.Fail ( "Out of bounds in docstore field data in %s (offset " INT64_FMT ")", m_szFilename, tOffset );
}
}
void DocstoreChecker_c::CheckSmallBlock ( const Docstore_c::Block_t & tBlock )
{
CSphFixedVector<BYTE> dBlock ( tBlock.m_uSize );
m_tReader.SeekTo ( tBlock.m_tOffset, 0 );
m_tReader.GetBytes ( dBlock.Begin(), dBlock.GetLength() );
MemoryReader2_c tBlockReader ( dBlock.Begin(), dBlock.GetLength() );
BlockData_t tResult;
tResult.m_uFlags = tBlockReader.GetVal<BYTE>();
tResult.m_uNumDocs = tBlockReader.UnzipInt();
tResult.m_uSize = tBlockReader.UnzipInt();
DWORD uCompressedLength = tResult.m_uSize;
bool bCompressed = tResult.m_uFlags & BLOCK_FLAG_COMPRESSED;
if ( bCompressed )
uCompressedLength = tBlockReader.UnzipInt();
if ( tResult.m_uFlags!=0 && tResult.m_uFlags!=BLOCK_FLAG_COMPRESSED )
m_tReporter.Fail ( "Unknown docstore small block flag (%u) in %s (offset " INT64_FMT ")", tResult.m_uFlags, m_szFilename, tBlock.m_tOffset );
if ( uCompressedLength>tResult.m_uSize )
m_tReporter.Fail ( "Docstore block size mismatch: compressed=%u, uncompressed=%u in %s (offset " INT64_FMT ")", uCompressedLength, tResult.m_uSize, m_szFilename, tBlock.m_tOffset );
if ( !tResult.m_uNumDocs )
m_tReporter.Fail ( "Docstore block invalid document count: %d", tResult.m_uNumDocs );
const BYTE * pBody = dBlock.Begin() + tBlockReader.GetPos();
CSphFixedVector<BYTE> dDecompressed(0);
if ( bCompressed )
{
dDecompressed.Reset ( tResult.m_uSize );
if ( !m_pCompressor->Decompress ( VecTraits_T<const BYTE> (pBody, uCompressedLength), dDecompressed) )
m_tReporter.Fail ( "Error decompressing small block in %s (offset " INT64_FMT ")", m_szFilename, tBlock.m_tOffset );
tResult.m_pData = dDecompressed.LeakData();
}
else
{
// we can't just pass tResult.m_pData because it doesn't point to the start of the allocated block
tResult.m_pData = new BYTE[tResult.m_uSize];
memcpy ( tResult.m_pData, pBody, tResult.m_uSize );
}
MemoryReader2_c tReader ( tResult.m_pData, tResult.m_uSize );
CSphBitvec tEmptyFields ( m_tFields.GetNumFields() );
for ( int i = 0; i < (int)tResult.m_uNumDocs; i++ )
CheckSmallBlockDoc ( tReader, tEmptyFields, tBlock.m_tOffset );
SafeDelete ( tResult.m_pData );
}
void DocstoreChecker_c::CheckBigBlockField ( const Docstore_c::FieldInfo_t & tInfo, SphOffset_t & tOffset )
{
if ( tInfo.m_uFlags & FIELD_FLAG_EMPTY )
return;
bool bCompressed = !!( tInfo.m_uFlags & FIELD_FLAG_COMPRESSED );
SphOffset_t tOffsetDelta = bCompressed ? tInfo.m_uCompressedLen : tInfo.m_uUncompressedLen;
BlockData_t tBlockData;
CSphFixedVector<BYTE> dField ( tOffsetDelta );
m_tReader.SeekTo ( tOffset, 0 );
m_tReader.GetBytes ( dField.Begin(), dField.GetLength() );
tBlockData.m_uSize = tInfo.m_uUncompressedLen;
if ( bCompressed )
{
CSphFixedVector<BYTE> dDecompressed(0);
dDecompressed.Reset ( tBlockData.m_uSize );
if ( !m_pCompressor->Decompress ( dField, dDecompressed ) )
m_tReporter.Fail ( "Error decompressing big block in %s (offset " INT64_FMT ")", m_szFilename, tOffset );
}
tOffset += tOffsetDelta;
if ( tOffset > m_tReader.GetFilesize() )
m_tReporter.Fail ( "Docstore block size+offset out of bounds in %s (offset " INT64_FMT ")", m_szFilename, tOffset );
}
void DocstoreChecker_c::CheckBigBlock ( const Docstore_c::Block_t & tBlock )
{
CSphFixedVector<Docstore_c::FieldInfo_t> dFieldInfo ( m_tFields.GetNumFields() );
CSphFixedVector<BYTE> dBlockHeader(tBlock.m_uHeaderSize);
CSphFixedVector<BYTE> dBlock ( tBlock.m_uSize );
m_tReader.SeekTo ( tBlock.m_tOffset, 0 );
m_tReader.GetBytes ( dBlockHeader.Begin(), dBlockHeader.GetLength() );
MemoryReader2_c tReader ( dBlockHeader.Begin(), dBlockHeader.GetLength() );
CSphVector<int> dFieldSort;
BYTE uBlockFlags = tReader.GetVal<BYTE>();
if ( uBlockFlags & ~BLOCK_FLAG_FIELD_REORDER )
m_tReporter.Fail ( "Unknown docstore big block flag (%u) in %s (offset " INT64_FMT ")", uBlockFlags, m_szFilename, tBlock.m_tOffset );
bool bNeedReorder = !!( uBlockFlags & BLOCK_FLAG_FIELD_REORDER );
if ( bNeedReorder )
{
dFieldSort.Resize ( m_tFields.GetNumFields() );
for ( auto & i : dFieldSort )
{
i = tReader.UnzipInt();
if ( i<0 || i>m_tFields.GetNumFields() )
m_tReporter.Fail ( "Error in docstore field remap (%d) in %s (offset " INT64_FMT ")", i, m_szFilename, tBlock.m_tOffset );
}
}
for ( int i = 0; i < m_tFields.GetNumFields(); i++ )
{
int iField = bNeedReorder ? dFieldSort[i] : i;
Docstore_c::FieldInfo_t & tInfo = dFieldInfo[iField];
tInfo.m_uFlags = tReader.GetVal<BYTE>();
if ( tInfo.m_uFlags & (~(FIELD_FLAG_EMPTY | FIELD_FLAG_COMPRESSED) ) )
m_tReporter.Fail ( "Unknown docstore big block field flag (%u) in %s (offset " INT64_FMT ")", tInfo.m_uFlags, m_szFilename, tBlock.m_tOffset );
if ( tInfo.m_uFlags & FIELD_FLAG_EMPTY )
continue;
tInfo.m_uUncompressedLen = tReader.UnzipInt();
if ( tInfo.m_uFlags & FIELD_FLAG_COMPRESSED )
tInfo.m_uCompressedLen = tReader.UnzipInt();
if ( tInfo.m_uCompressedLen>tInfo.m_uUncompressedLen )
m_tReporter.Fail ( "Docstore block size mismatch: compressed=%u, uncompressed=%u in %s (offset " INT64_FMT ")", tInfo.m_uCompressedLen, tInfo.m_uUncompressedLen, m_szFilename, tBlock.m_tOffset );
if ( tReader.GetPos() > tReader.GetLength() )
m_tReporter.Fail ( "Out of bounds in docstore field data in %s (offset " INT64_FMT ")", m_szFilename, tBlock.m_tOffset );
}
SphOffset_t tOffset = tBlock.m_tOffset+tBlock.m_uHeaderSize;
for ( int i = 0; i < m_tFields.GetNumFields(); i++ )
CheckBigBlockField ( dFieldInfo[bNeedReorder ? dFieldSort[i] : i], tOffset );
}
void DocstoreChecker_c::CheckBlock ( const Docstore_c::Block_t & tBlock )
{
if ( tBlock.m_eType==BLOCK_TYPE_SMALL )
CheckSmallBlock(tBlock);
else
CheckBigBlock(tBlock);
}
//////////////////////////////////////////////////////////////////////////
std::unique_ptr<Docstore_i> CreateDocstore ( int64_t iIndexId, const CSphString & sFilename, CSphString & sError )
{
auto pDocstore = std::make_unique<Docstore_c>( iIndexId, sFilename );
if ( !pDocstore->Init(sError) )
return nullptr;
return pDocstore;
}
std::unique_ptr<DocstoreBuilder_i> CreateDocstoreBuilder ( const CSphString & sFilename, const DocstoreSettings_t & tSettings, int iBufferSize, CSphString & sError )
{
auto pBuilder = std::make_unique<DocstoreBuilder_c>( sFilename, tSettings, iBufferSize );
if ( !pBuilder->Init(sError) )
return nullptr;
return pBuilder;
}
std::unique_ptr<DocstoreRT_i> CreateDocstoreRT()
{
return std::make_unique<DocstoreRT_c>();
}
std::unique_ptr<DocstoreFields_i> CreateDocstoreFields()
{
return std::make_unique<DocstoreFields_c>();
}
void InitDocstore ( int64_t iCacheSize )
{
BlockCache_c::Init(iCacheSize);
DocstoreReaders_c::Init();
}
void ShutdownDocstore()
{
BlockCache_c::Done();
DocstoreReaders_c::Done();
}
bool CheckDocstore ( CSphAutoreader & tReader, DebugCheckError_i & tReporter, int64_t iRowsCount )
{
DocstoreChecker_c tChecker ( tReader, tReporter, iRowsCount );
return tChecker.Check();
}
| 58,356
|
C++
|
.cpp
| 1,494
| 36.690763
| 254
| 0.715018
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,868
|
icu.cpp
|
manticoresoftware_manticoresearch/src/icu.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "icu.h"
#include "sphinxint.h"
#include "cjkpreprocessor.h"
#if WITH_ICU
#ifndef U_STATIC_IMPLEMENTATION
#define U_STATIC_IMPLEMENTATION
#endif
#ifndef U_CHARSET_IS_UTF8
#define U_CHARSET_IS_UTF8 1
#endif
#ifndef U_NO_DEFAULT_INCLUDE_UTF_HEADERS
#define U_NO_DEFAULT_INCLUDE_UTF_HEADERS 1
#endif
#include <unicode/brkiter.h>
#include <unicode/udata.h>
#include <unicode/ustring.h>
static CSphString g_sICUDir;
//////////////////////////////////////////////////////////////////////////
static bool g_bICUInitialized = false;
static void ConfigureICU()
{
if ( g_bICUInitialized )
return;
g_sICUDir = GetICUDataDir();
u_setDataDirectory ( g_sICUDir.cstr() );
g_bICUInitialized = true;
}
//////////////////////////////////////////////////////////////////////////
class ICUPreprocessor_c : public CJKPreprocessor_c
{
public:
bool Init ( CSphString & sError ) override;
CJKPreprocessor_c * Clone ( const FieldFilterOptions_t * pOptions ) override { return new ICUPreprocessor_c; }
protected:
void ProcessBuffer ( const BYTE * pBuffer, int iLength ) override;
const BYTE * GetNextToken ( int & iTokenLen ) override;
private:
std::unique_ptr<icu::BreakIterator> m_pBreakIterator;
const BYTE * m_pBuffer {nullptr};
int m_iBoundaryIndex {0};
int m_iPrevBoundary {0};
};
bool ICUPreprocessor_c::Init ( CSphString & sError )
{
ConfigureICU();
assert ( !m_pBreakIterator );
UErrorCode tStatus = U_ZERO_ERROR;
m_pBreakIterator = std::unique_ptr<icu::BreakIterator> { icu::BreakIterator::createWordInstance ( icu::Locale::getChinese(), tStatus ) };
if ( U_FAILURE(tStatus) )
{
sError.SetSprintf( "Unable to initialize ICU break iterator: %s", u_errorName(tStatus) );
if ( tStatus==U_MISSING_RESOURCE_ERROR )
sError.SetSprintf ( "%s. Make sure ICU data file is accessible (using '%s' folder)", sError.cstr(), g_sICUDir.cstr() );
return false;
}
if ( !m_pBreakIterator )
{
sError = "Unable to initialize ICU break iterator";
return false;
}
return true;
}
void ICUPreprocessor_c::ProcessBuffer ( const BYTE * pBuffer, int iLength )
{
assert ( m_pBreakIterator );
UErrorCode tStatus = U_ZERO_ERROR;
UText * pUText = nullptr;
pUText = utext_openUTF8 ( pUText, (const char*)pBuffer, iLength, &tStatus );
if ( U_FAILURE(tStatus) )
sphWarning ( "Error processing buffer (ICU): %s", u_errorName(tStatus) );
assert ( pUText );
m_pBreakIterator->setText ( pUText, tStatus );
if ( U_FAILURE(tStatus) )
sphWarning ( "Error processing buffer (ICU): %s", u_errorName(tStatus) );
utext_close ( pUText );
m_pBuffer = pBuffer;
m_iPrevBoundary = m_iBoundaryIndex = m_pBreakIterator->first();
}
const BYTE * ICUPreprocessor_c::GetNextToken ( int & iTokenLen )
{
if ( !m_pBreakIterator || m_iBoundaryIndex==icu::BreakIterator::DONE )
return nullptr;
while ( ( m_iBoundaryIndex = m_pBreakIterator->next() )!=icu::BreakIterator::DONE )
{
int iLength = m_iBoundaryIndex-m_iPrevBoundary;
// ltrim
const BYTE * pStart = m_pBuffer+m_iPrevBoundary;
const BYTE * pMax = pStart + iLength;
while ( pStart<pMax && sphIsSpace(*pStart) )
pStart++;
// rtrim
while ( pStart<pMax && sphIsSpace(*(pMax-1)) )
pMax--;
m_iPrevBoundary = m_iBoundaryIndex;
if ( pStart!=pMax )
{
iTokenLen = int ( pMax-pStart );
return pStart;
}
}
return nullptr;
}
//////////////////////////////////////////////////////////////////////////
bool sphCheckConfigICU ( CSphIndexSettings &, CSphString & )
{
return true;
}
bool sphSpawnFilterICU ( std::unique_ptr<ISphFieldFilter> & pFieldFilter, const CSphIndexSettings & tSettings, const CSphTokenizerSettings & tTokSettings, const char * szIndex, CSphString & sError )
{
if ( tSettings.m_ePreprocessor!=Preprocessor_e::ICU )
return true;
auto pFilterICU = CreateFilterCJK ( std::move ( pFieldFilter ), std::make_unique<ICUPreprocessor_c>(), tTokSettings.m_sBlendChars.cstr(), sError );
if ( !sError.IsEmpty() )
{
sError.SetSprintf ( "table '%s': Error initializing ICU: %s", szIndex, sError.cstr() );
return false;
}
pFieldFilter = std::move ( pFilterICU );
return true;
}
#else
bool sphCheckConfigICU ( CSphIndexSettings & tSettings, CSphString & sError )
{
if ( tSettings.m_ePreprocessor==Preprocessor_e::ICU )
{
tSettings.m_ePreprocessor = Preprocessor_e::NONE;
sError.SetSprintf ( "ICU options specified, but no ICU support compiled; ignoring\n" );
return false;
}
return true;
}
bool sphSpawnFilterICU ( std::unique_ptr<ISphFieldFilter> &, const CSphIndexSettings &, const CSphTokenizerSettings &, const char *, CSphString & )
{
return true;
}
#endif
| 5,100
|
C++
|
.cpp
| 147
| 32.510204
| 198
| 0.703734
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,869
|
taskglobalidf.cpp
|
manticoresoftware_manticoresearch/src/taskglobalidf.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "taskglobalidf.h"
#include "searchdtask.h"
#include "searchdaemon.h"
#include "global_idf.h"
// logf() is not there sometimes (eg. Solaris 9)
#if !_WIN32 && !HAVE_LOGF
static inline float logf ( float v )
{
return (float) log ( v );
}
#endif
void RotateGlobalIdf ()
{
Threads::StartJob ( []
{
static Threads::Coro::Mutex_c tSerializer;
Threads::Coro::ScopedMutex_t tLock { tSerializer };
CSphVector<CSphString> dFiles;
ServedSnap_t hLocals = g_pLocalIndexes->GetHash();
for ( auto& tIt : *hLocals )
{
auto pIndex = tIt.second;
if ( pIndex && !pIndex->m_sGlobalIDFPath.IsEmpty() )
dFiles.Add ( pIndex->m_sGlobalIDFPath );
}
auto pDesc = PublishSystemInfo ( "ROTATE global IDF" );
sph::UpdateGlobalIDFs ( dFiles );
});
}
| 1,240
|
C++
|
.cpp
| 40
| 29.1
| 80
| 0.719064
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,870
|
sortsetup.cpp
|
manticoresoftware_manticoresearch/src/sortsetup.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sortsetup.h"
#include "sphinxjson.h"
#include "sphinxsort.h"
CSphMatchComparatorState::CSphMatchComparatorState()
{
for ( int i=0; i<MAX_ATTRS; ++i )
{
m_eKeypart[i] = SPH_KEYPART_ROWID;
m_dAttrs[i] = -1;
}
}
bool CSphMatchComparatorState::UsesBitfields() const
{
for ( int i=0; i<MAX_ATTRS; ++i )
if ( m_eKeypart[i]==SPH_KEYPART_INT && m_tLocator[i].IsBitfield() )
return true;
return false;
}
//////////////////////////////////////////////////////////////////////////
class SortClauseTokenizer_c
{
public:
explicit SortClauseTokenizer_c ( const char * sBuffer )
{
auto iLen = (int) strlen(sBuffer);
m_pBuf = new char [ iLen+1 ];
m_pMax = m_pBuf+iLen;
m_pCur = m_pBuf;
// make string lowercase but keep case of JSON.field
bool bJson = false;
for ( int i=0; i<=iLen; i++ )
{
char cSrc = sBuffer[i];
char cDst = ToLower ( cSrc );
bJson = ( cSrc=='.' || cSrc=='[' || ( bJson && cDst>0 ) ); // keep case of valid char sequence after '.' and '[' symbols
m_pBuf[i] = bJson ? cSrc : cDst;
}
}
~SortClauseTokenizer_c()
{
SafeDeleteArray ( m_pBuf );
}
const char * GetToken ()
{
// skip spaces
while ( m_pCur<m_pMax && !*m_pCur )
m_pCur++;
if ( m_pCur>=m_pMax )
return nullptr;
// memorize token start, and move pointer forward
const char * sRes = m_pCur;
while ( *m_pCur )
m_pCur++;
return sRes;
}
bool IsSparseCount ( const char * sTok )
{
const char * sSeq = "(*)";
for ( ; sTok<m_pMax && *sSeq; sTok++ )
{
bool bGotSeq = ( *sSeq==*sTok );
if ( bGotSeq )
sSeq++;
// stop checking on any non space char outside sequence or sequence end
if ( ( !bGotSeq && !sphIsSpace ( *sTok ) && *sTok!='\0' ) || !*sSeq )
break;
}
if ( !*sSeq && sTok+1<m_pMax && !sTok[1] )
{
// advance token iterator after composite count(*) token
m_pCur = sTok+1;
return true;
}
return false;
}
protected:
const char * m_pCur = nullptr;
const char * m_pMax = nullptr;
char * m_pBuf = nullptr;
char ToLower ( char c )
{
// 0..9, A..Z->a..z, _, a..z, @, .
if ( ( c>='0' && c<='9' ) || ( c>='a' && c<='z' ) || c=='_' || c=='@' || c=='.' || c=='[' || c==']' || c=='\'' || c=='\"' || c=='(' || c==')' || c=='*' )
return c;
if ( c>='A' && c<='Z' )
return c-'A'+'a';
return 0;
}
};
inline ESphSortKeyPart Attr2Keypart_ ( ESphAttr eType )
{
switch ( eType )
{
case SPH_ATTR_FLOAT:
return SPH_KEYPART_FLOAT;
case SPH_ATTR_DOUBLE:
return SPH_KEYPART_DOUBLE;
case SPH_ATTR_STRING:
return SPH_KEYPART_STRING;
case SPH_ATTR_JSON:
case SPH_ATTR_JSON_PTR:
case SPH_ATTR_JSON_FIELD:
case SPH_ATTR_JSON_FIELD_PTR:
case SPH_ATTR_STRINGPTR:
return SPH_KEYPART_STRINGPTR;
default:
return SPH_KEYPART_INT;
}
}
//////////////////////////////////////////////////////////////////////////
class SortStateSetup_c
{
public:
SortStateSetup_c ( const char * szTok, SortClauseTokenizer_c & tTok, CSphMatchComparatorState & tState, CSphVector<ExtraSortExpr_t> & dExtraExprs, int iField, const ISphSchema & tSchema, const CSphQuery & tQuery, const JoinArgs_t * pJoinArgs );
bool Setup ( CSphString & sError );
private:
const char * m_szTok = nullptr;
SortClauseTokenizer_c & m_tTok;
CSphMatchComparatorState & m_tState;
ExtraSortExpr_t & m_tExtraExpr;
const int m_iField;
const ISphSchema & m_tSchema;
const CSphQuery & m_tQuery;
const JoinArgs_t * m_pJoinArgs = nullptr;
int m_iAttr = -1;
ESphAttr m_eAttrType = SPH_ATTR_NONE;
bool SetupSortByRelevance();
void UnifyInternalAttrNames();
bool CheckOrderByMva ( CSphString & sError ) const;
void FindAliasedGroupby();
void FindAliasedSortby();
bool IsJsonAttr() const;
void SetupJsonAttr();
bool SetupJsonField ( CSphString & sError );
bool SetupColumnar ( CSphString & sError );
bool SetupJson ( CSphString & sError );
void SetupJsonConversions();
void SetupPrecalculatedJson();
};
SortStateSetup_c::SortStateSetup_c ( const char * szTok, SortClauseTokenizer_c & tTok, CSphMatchComparatorState & tState, CSphVector<ExtraSortExpr_t> & dJsonExprs, int iField, const ISphSchema & tSchema, const CSphQuery & tQuery, const JoinArgs_t * pJoinArgs )
: m_szTok ( szTok )
, m_tTok ( tTok )
, m_tState ( tState )
, m_tExtraExpr ( dJsonExprs[iField] )
, m_iField ( iField )
, m_tSchema ( tSchema )
, m_tQuery ( tQuery )
, m_pJoinArgs ( pJoinArgs )
{}
bool SortStateSetup_c::SetupSortByRelevance()
{
if ( !strcasecmp ( m_szTok, "@relevance" )
|| !strcasecmp ( m_szTok, "@rank" )
|| !strcasecmp ( m_szTok, "@weight" )
|| !strcasecmp ( m_szTok, "weight()" ) )
{
m_tState.m_eKeypart[m_iField] = SPH_KEYPART_WEIGHT;
return true;
}
return false;
}
void SortStateSetup_c::UnifyInternalAttrNames()
{
if ( !strcasecmp ( m_szTok, "@group" ) )
m_szTok = "@groupby";
else if ( !strcasecmp ( m_szTok, "count(*)" ) )
m_szTok = "@count";
else if ( !strcasecmp ( m_szTok, "facet()" ) )
m_szTok = "@groupby"; // facet() is essentially a @groupby alias
else if ( strcasecmp ( m_szTok, "count" )>=0 && m_tTok.IsSparseCount ( m_szTok + sizeof ( "count" ) - 1 ) ) // epression count(*) with various spaces
m_szTok = "@count";
else if ( !strcasecmp ( m_szTok, "knn_dist()" ) )
m_szTok = "@knn_dist";
}
bool SortStateSetup_c::CheckOrderByMva ( CSphString & sError ) const
{
int iAttr = m_tSchema.GetAttrIndex(m_szTok);
if ( iAttr<0 )
return true;
ESphAttr eAttrType = m_tSchema.GetAttr(iAttr).m_eAttrType;
if ( eAttrType==SPH_ATTR_UINT32SET || eAttrType==SPH_ATTR_INT64SET || eAttrType==SPH_ATTR_UINT32SET_PTR || eAttrType==SPH_ATTR_INT64SET_PTR )
{
sError.SetSprintf ( "order by MVA is undefined" );
return false;
}
return true;
}
void SortStateSetup_c::FindAliasedGroupby()
{
if ( m_iAttr>=0 )
return;
int iAttr = m_tSchema.GetAttrIndex(m_szTok);
if ( iAttr>=0 )
{
m_iAttr = iAttr;
return;
}
// try to lookup aliased count(*) and aliased groupby() in select items
for ( auto & i : m_tQuery.m_dItems )
{
if ( !i.m_sAlias.cstr() || strcasecmp ( i.m_sAlias.cstr(), m_szTok ) )
continue;
if ( i.m_sExpr.Begins("@") )
{
m_iAttr = m_tSchema.GetAttrIndex ( i.m_sExpr.cstr() );
return;
}
if ( i.m_sExpr=="count(*)" )
{
m_iAttr = m_tSchema.GetAttrIndex ( "@count" );
return;
}
if ( i.m_sExpr=="groupby()" )
{
CSphString sGroupJson = SortJsonInternalSet ( m_tQuery.m_sGroupBy );
m_iAttr = m_tSchema.GetAttrIndex ( sGroupJson.cstr() );
// try numeric group by
if ( m_iAttr<0 )
m_iAttr = m_tSchema.GetAttrIndex ( "@groupby" );
return;
}
}
}
void SortStateSetup_c::FindAliasedSortby()
{
if ( m_iAttr>=0 )
return;
int iAttr = m_tSchema.GetAttrIndex(m_szTok);
if ( iAttr>=0 )
{
m_iAttr = iAttr;
return;
}
for ( auto & i : m_tQuery.m_dItems )
{
if ( !i.m_sAlias.cstr() || strcasecmp ( i.m_sAlias.cstr(), m_szTok ) )
continue;
m_iAttr = m_tSchema.GetAttrIndex ( i.m_sExpr.cstr() );
return;
}
}
bool SortStateSetup_c::IsJsonAttr() const
{
if ( m_iAttr<0 )
return false;
ESphAttr eAttrType = m_tSchema.GetAttr(m_iAttr).m_eAttrType;
if ( eAttrType==SPH_ATTR_JSON_FIELD || eAttrType==SPH_ATTR_JSON_FIELD_PTR || eAttrType==SPH_ATTR_JSON || eAttrType==SPH_ATTR_JSON_PTR )
return true;
return false;
}
void SortStateSetup_c::SetupJsonAttr()
{
const CSphColumnInfo & tAttr = m_tSchema.GetAttr(m_iAttr);
m_tExtraExpr.m_pExpr = tAttr.m_pExpr;
m_tExtraExpr.m_tKey = JsonKey_t ( m_szTok, (int)strlen(m_szTok) );
}
bool SortStateSetup_c::SetupJsonField ( CSphString & sError )
{
CSphString sJsonCol;
if ( !sphJsonNameSplit ( m_szTok, m_tQuery.m_sJoinIdx.cstr(), &sJsonCol ) )
return true;
m_iAttr = m_tSchema.GetAttrIndex ( sJsonCol.cstr() );
if ( m_iAttr>=0 )
{
ExprParseArgs_t tExprArgs;
ISphExpr * pExpr = sphExprParse ( m_szTok, m_tSchema, m_pJoinArgs ? &(m_pJoinArgs->m_sIndex2) : nullptr, sError, tExprArgs );
if ( !pExpr )
return false;
m_tExtraExpr.m_pExpr = pExpr;
m_tExtraExpr.m_tKey = JsonKey_t ( m_szTok, (int) strlen ( m_szTok ) );
}
return true;
}
bool SortStateSetup_c::SetupColumnar ( CSphString & sError )
{
if ( m_iAttr<0 )
return true;
const CSphColumnInfo & tAttr = m_tSchema.GetAttr(m_iAttr);
if ( !tAttr.IsColumnar() || tAttr.IsJoined() )
return true;
ExprParseArgs_t tExprArgs;
tExprArgs.m_pAttrType = &m_eAttrType;
ISphExpr * pExpr = sphExprParse ( m_szTok, m_tSchema, m_pJoinArgs ? &(m_pJoinArgs->m_sIndex2) : nullptr, sError, tExprArgs );
if ( !pExpr )
return false;
m_tExtraExpr.m_pExpr = pExpr;
m_tExtraExpr.m_eType = m_eAttrType;
return true;
}
bool SortStateSetup_c::SetupJson ( CSphString & sError )
{
if ( IsJsonAttr() )
{
SetupJsonAttr();
return true;
}
// try JSON attribute and use JSON attribute instead of JSON field
if ( m_iAttr<0 )
return SetupJsonField(sError);
return true;
}
void SortStateSetup_c::SetupJsonConversions()
{
if ( m_iAttr>=0 )
return;
// try json conversion functions (integer()/double()/bigint() in the order by clause)
ESphAttr eAttrType = SPH_ATTR_NONE;
ExprParseArgs_t tExprArgs;
tExprArgs.m_pAttrType = &eAttrType;
CSphString sError; // ignored
ISphExpr * pExpr = sphExprParse ( m_szTok, m_tSchema, m_pJoinArgs ? &(m_pJoinArgs->m_sIndex2) : nullptr, sError, tExprArgs );
if ( !pExpr )
return;
m_eAttrType = eAttrType;
m_tExtraExpr.m_pExpr = pExpr;
m_tExtraExpr.m_tKey = JsonKey_t ( m_szTok, (int) strlen(m_szTok) );
m_tExtraExpr.m_eType = m_eAttrType;
m_tExtraExpr.m_tKey.m_uMask = 0;
m_iAttr = 0; // will be remapped in SetupSortRemap
}
void SortStateSetup_c::SetupPrecalculatedJson()
{
if ( m_iAttr>=0 )
return;
// try precalculated json fields/columnar attrs received from agents (prefixed with @int_*)
CSphString sName;
sName.SetSprintf ( "%s%s", GetInternalAttrPrefix(), m_szTok );
m_iAttr = m_tSchema.GetAttrIndex ( sName.cstr() );
}
bool SortStateSetup_c::Setup ( CSphString & sError )
{
if ( SetupSortByRelevance() )
return true;
UnifyInternalAttrNames();
if ( !CheckOrderByMva(sError) )
return false;
FindAliasedGroupby();
FindAliasedSortby();
if ( !SetupColumnar(sError) )
return false;
if ( !SetupJson(sError) )
return false;
SetupJsonConversions();
SetupPrecalculatedJson();
if ( m_iAttr<0 )
{
sError.SetSprintf ( "sort-by attribute '%s' not found", m_szTok );
return false;
}
const CSphColumnInfo & tCol = m_tSchema.GetAttr(m_iAttr);
m_tState.m_eKeypart[m_iField] = Attr2Keypart_ ( m_eAttrType!=SPH_ATTR_NONE ? m_eAttrType : tCol.m_eAttrType );
m_tState.m_tLocator[m_iField] = tCol.m_tLocator;
m_tState.m_dAttrs[m_iField] = m_iAttr;
return true;
}
//////////////////////////////////////////////////////////////////////////
ESortClauseParseResult sphParseSortClause ( const CSphQuery & tQuery, const char * szClause, const ISphSchema & tSchema, ESphSortFunc & eFunc, CSphMatchComparatorState & tState, CSphVector<ExtraSortExpr_t> & dExtraExprs, bool bComputeItems, const JoinArgs_t * pJoinArgs, CSphString & sError )
{
for ( auto & tAttr : tState.m_dAttrs )
tAttr = -1;
dExtraExprs.Resize ( tState.MAX_ATTRS );
// mini parser
SortClauseTokenizer_c tTok(szClause);
bool bField = false; // whether i'm expecting field name or sort order
int iField = 0;
for ( const char * pTok=tTok.GetToken(); pTok; pTok=tTok.GetToken() )
{
bField = !bField;
// special case, sort by random
if ( iField==0 && bField && strcmp ( pTok, "@random" )==0 )
return SORT_CLAUSE_RANDOM;
// handle sort order
if ( !bField )
{
// check
if ( strcmp ( pTok, "desc" ) && strcmp ( pTok, "asc" ) )
{
sError.SetSprintf ( "invalid sorting order '%s'", pTok );
return SORT_CLAUSE_ERROR;
}
// set
if ( !strcmp ( pTok, "desc" ) )
tState.m_uAttrDesc |= ( 1<<iField );
iField++;
continue;
}
// handle attribute name
if ( iField==CSphMatchComparatorState::MAX_ATTRS )
{
sError.SetSprintf ( "too many sort-by attributes; maximum count is %d", CSphMatchComparatorState::MAX_ATTRS );
return SORT_CLAUSE_ERROR;
}
SortStateSetup_c tSetup ( pTok, tTok, tState, dExtraExprs, iField, tSchema, tQuery, pJoinArgs );
if ( !tSetup.Setup(sError) )
return SORT_CLAUSE_ERROR;
}
// fix for
// FACET attr ORDER BY COUNT(*)
// into
// FACET attr ORDER BY COUNT(*) DESC
if ( iField==0 && tQuery.m_bFacet )
{
tState.m_uAttrDesc |= 1;
iField++;
}
if ( iField==0 )
{
sError.SetSprintf ( "no sort order defined" );
return SORT_CLAUSE_ERROR;
}
switch ( iField )
{
case 1: eFunc = FUNC_GENERIC1; break;
case 2: eFunc = FUNC_GENERIC2; break;
case 3: eFunc = FUNC_GENERIC3; break;
case 4: eFunc = FUNC_GENERIC4; break;
case 5: eFunc = FUNC_GENERIC5; break;
default: sError.SetSprintf ( "INTERNAL ERROR: %d fields in sphParseSortClause()", iField ); return SORT_CLAUSE_ERROR;
}
return SORT_CLAUSE_OK;
}
| 13,346
|
C++
|
.cpp
| 432
| 28.270833
| 292
| 0.669818
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,871
|
fileutils.cpp
|
manticoresoftware_manticoresearch/src/fileutils.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "fileutils.h"
#include "sphinxint.h"
#include "std/crc32.h"
#if _WIN32
#define getcwd _getcwd
#include <shlwapi.h>
#pragma comment(linker, "/defaultlib:ShLwApi.Lib")
#pragma message("Automatically linking with ShLwApi.Lib")
#else
#include <glob.h>
#endif
// whether to collect IO stats
static bool g_bCollectIOStats = false;
static thread_local CSphIOStats* g_pTlsIOStats;
CSphIOStats::~CSphIOStats ()
{
Stop();
}
void CSphIOStats::Start()
{
if ( !g_bCollectIOStats )
return;
m_pPrev = g_pTlsIOStats;
g_pTlsIOStats = this;
m_bEnabled = true;
}
void CSphIOStats::Stop()
{
if ( !g_bCollectIOStats )
return;
m_bEnabled = false;
g_pTlsIOStats = m_pPrev;
}
void CSphIOStats::Add ( const CSphIOStats & b )
{
m_iReadTime += b.m_iReadTime;
m_iReadOps += b.m_iReadOps;
m_iReadBytes += b.m_iReadBytes;
m_iWriteTime += b.m_iWriteTime;
m_iWriteOps += b.m_iWriteOps;
m_iWriteBytes += b.m_iWriteBytes;
}
void SafeClose ( int& iFD )
{
if ( iFD >= 0 )
::close ( iFD );
iFD = -1;
}
//////////////////////////////////////////////////////////////////////////
#if _WIN32
bool sphLockEx ( int iFile, bool bWait )
{
HANDLE hHandle = (HANDLE)_get_osfhandle ( iFile );
if ( hHandle != INVALID_HANDLE_VALUE )
{
OVERLAPPED tOverlapped;
memset ( &tOverlapped, 0, sizeof ( tOverlapped ) );
return !!LockFileEx ( hHandle, LOCKFILE_EXCLUSIVE_LOCK | ( bWait ? 0 : LOCKFILE_FAIL_IMMEDIATELY ), 0, 1, 0, &tOverlapped );
}
return false;
}
void sphLockUn ( int iFile )
{
HANDLE hHandle = (HANDLE)_get_osfhandle ( iFile );
if ( hHandle != INVALID_HANDLE_VALUE )
{
OVERLAPPED tOverlapped;
memset ( &tOverlapped, 0, sizeof ( tOverlapped ) );
UnlockFileEx ( hHandle, 0, 1, 0, &tOverlapped );
}
}
#else
bool sphLockEx ( int iFile, bool bWait )
{
struct flock tLock;
tLock.l_type = F_WRLCK;
tLock.l_whence = SEEK_SET;
tLock.l_start = 0;
tLock.l_len = 0;
if ( !bWait )
return ( fcntl ( iFile, F_SETLK, &tLock ) != -1 );
#if HAVE_F_SETLKW
return ( fcntl ( iFile, F_SETLKW, &tLock ) != -1 );
#else
for ( ;; )
{
int iResult = fcntl ( iFile, F_SETLK, &tLock ) if ( iResult != -1 ) return true;
if ( errno == EACCES || errno == EAGAIN )
sphSleepMsec ( 10 );
else
return false;
}
#endif
}
void sphLockUn ( int iFile )
{
struct flock tLock;
tLock.l_type = F_UNLCK;
tLock.l_whence = SEEK_SET;
tLock.l_start = 0;
tLock.l_len = 0;
if ( fcntl ( iFile, F_SETLK, &tLock ) == -1 )
sphWarning ( "sphLockUn: failed fcntl. Error: %d '%s'", errno, strerror ( errno ) );
}
#endif
bool RawFileLock ( const CSphString & sFile, int & iLockFD, CSphString & sError )
{
if ( iLockFD<0 )
{
iLockFD = ::open ( sFile.cstr (), SPH_O_NEW, 0644 );
if ( iLockFD<0 )
{
sError.SetSprintf ( "failed to open '%s': %u '%s'", sFile.cstr (), errno, strerrorm ( errno ) );
sphLogDebug ( "failed to open '%s': %u '%s'", sFile.cstr (), errno, strerrorm ( errno ) );
return false;
}
}
if ( !sphLockEx ( iLockFD, false ) )
{
sError.SetSprintf ( "failed to lock '%s': %u '%s'", sFile.cstr (), errno, strerrorm ( errno ) );
SafeClose ( iLockFD );
return false;
}
sphLogDebug ( "lock %s success", sFile.cstr () );
return true;
}
void RawFileUnLock ( const CSphString& sFile, int& iLockFD )
{
if ( iLockFD < 0 )
return;
sphLogDebug ( "File ID ok, closing lock FD %d, unlinking %s", iLockFD, sFile.cstr() );
sphLockUn ( iLockFD );
SafeClose ( iLockFD );
::unlink ( sFile.cstr() );
}
//////////////////////////////////////////////////////////////////////////
CSphIOStats * GetIOStats()
{
if ( !g_bCollectIOStats )
return nullptr;
CSphIOStats * pIOStats = g_pTlsIOStats;
if ( !pIOStats || !pIOStats->IsEnabled() )
return nullptr;
return pIOStats;
}
//////////////////////////////////////////////////////////////////////////
bool CSphSavedFile::Collect ( const char * szFilename, CSphString * pError )
{
if ( !szFilename || !*szFilename )
{
m_sFilename.SetBinary ( nullptr, 0 );
m_uSize = m_uCTime = m_uMTime = 0;
m_uCRC32 = 0;
return true;
}
m_sFilename = szFilename;
struct_stat tStat = {0};
if ( stat ( szFilename, &tStat ) < 0 )
{
if ( pError )
*pError = strerrorm ( errno );
return false;
}
m_uSize = tStat.st_size;
m_uCTime = tStat.st_ctime;
m_uMTime = tStat.st_mtime;
DWORD uCRC32 = 0;
if ( !sphCalcFileCRC32 ( szFilename, uCRC32 ) )
return false;
m_uCRC32 = uCRC32;
return true;
}
static void ReadSavedFile ( CSphSavedFile & tFile, const char * szFilename, bool bSharedStopwords, CSphString * sWarning )
{
tFile.m_sFilename = szFilename;
CSphString sName ( szFilename );
if ( !sName.IsEmpty() && sWarning )
{
if ( !sphIsReadable ( sName ) && bSharedStopwords )
{
StripPath ( sName );
sName.SetSprintf ( "%s/stopwords/%s", GET_FULL_SHARE_DIR (), sName.cstr() );
}
struct_stat tFileInfo;
if ( stat ( sName.cstr(), &tFileInfo ) < 0 )
{
if ( bSharedStopwords )
sWarning->SetSprintf ( "failed to stat either %s or %s: %s", szFilename, sName.cstr(), strerrorm(errno) );
else
sWarning->SetSprintf ( "failed to stat %s: %s", szFilename, strerrorm(errno) );
}
else
{
DWORD uMyCRC32 = 0;
if ( !sphCalcFileCRC32 ( sName.cstr(), uMyCRC32 ) )
{
sWarning->SetSprintf ( "failed to calculate CRC32 for %s", sName.cstr() );
} else
{
if ( uMyCRC32!=tFile.m_uCRC32 || tFileInfo.st_size!=tFile.m_uSize )
sWarning->SetSprintf ( "'%s' differs from the original", sName.cstr() );
}
}
}
}
void CSphSavedFile::Read ( CSphReader & tReader, const char * szFilename, bool bSharedStopwords, CSphString * sWarning )
{
m_uSize = tReader.GetOffset ();
m_uCTime = tReader.GetOffset ();
m_uMTime = tReader.GetOffset ();
m_uCRC32 = tReader.GetDword ();
ReadSavedFile ( *this, szFilename, bSharedStopwords, sWarning );
}
void CSphSavedFile::Read ( const bson::Bson_c& tNode, const char* szFilename, bool bSharedStopwords, CSphString* sWarning )
{
using namespace bson;
m_uSize = Int ( tNode.ChildByName ( "size" ) );
m_uCTime = Int ( tNode.ChildByName ( "ctime" ) );
m_uMTime = Int ( tNode.ChildByName ( "mtime" ) );
m_uCRC32 = Int ( tNode.ChildByName ( "crc32" ) );
ReadSavedFile ( *this, szFilename, bSharedStopwords, sWarning );
}
//////////////////////////////////////////////////////////////////////////
bool sphCalcFileCRC32 ( const char * szFilename, DWORD & uCRC32 )
{
uCRC32 = 0;
if ( !szFilename )
return false;
FILE * pFile = fopen ( szFilename, "rb" );
if ( !pFile )
return false;
const int BUFFER_SIZE = 131072;
CSphFixedVector<BYTE> dBuffer (BUFFER_SIZE);
int iBytesRead;
while ( ( iBytesRead = (int)fread ( dBuffer.begin(), 1, BUFFER_SIZE, pFile ) ) != 0 )
uCRC32 = sphCRC32 ( dBuffer.begin(), iBytesRead, uCRC32 );
fclose ( pFile );
return true;
}
bool sphIsReadable ( const char * sPath, CSphString * pError )
{
int iFD = ::open ( sPath, SPH_O_READ );
if ( iFD<0 )
{
if ( pError )
pError->SetSprintf ( "%s unreadable: %s", sPath, strerror(errno) );
return false;
}
close ( iFD );
return true;
}
bool sphIsReadable ( const CSphString & sPath, CSphString * pError )
{
return sphIsReadable ( sPath.cstr(), pError );
}
bool sphFileExists ( const char * szFilename, CSphString * pError )
{
struct_stat st = {0};
if( stat( szFilename, &st ) != 0 )
{
if ( pError )
pError->SetSprintf ( "cannot access %s", szFilename );
return false;
}
else if ( st.st_mode & S_IFDIR )
{
if ( pError )
pError->SetSprintf ( "%s is not a file", szFilename );
return false;
}
return true;
}
bool sphFileExists ( const CSphString& sFilename, CSphString* pError )
{
return sphFileExists ( sFilename.cstr(), pError );
}
bool sphDirExists ( const char * szFilename, CSphString * pError )
{
struct_stat st = {0};
if( stat( szFilename, &st ) != 0 )
{
if ( pError )
pError->SetSprintf ( "cannot access %s", szFilename );
return false;
}
else if ( !(st.st_mode & S_IFDIR) )
{
if ( pError )
pError->SetSprintf ( "%s is not a directory", szFilename );
return false;
}
return true;
}
bool sphDirExists ( const CSphString& sFilename, CSphString* pError )
{
return sphDirExists ( sFilename.cstr(), pError );
}
int sphOpenFile ( const char * sFile, CSphString & sError, bool bWrite )
{
int iFlags = bWrite ? O_RDWR : SPH_O_READ;
int iFD = ::open ( sFile, iFlags, 0644 );
if ( iFD<0 )
{
sError.SetSprintf ( "failed to open file '%s': '%s'", sFile, strerror(errno) );
return -1;
}
return iFD;
}
int64_t sphGetFileSize ( int iFD, CSphString * sError )
{
if ( iFD<0 )
{
if ( sError )
sError->SetSprintf ( "invalid descriptor to fstat '%d'", iFD );
return -1;
}
struct_stat st;
if ( fstat ( iFD, &st )<0 )
{
if ( sError )
sError->SetSprintf ( "failed to fstat file '%d': '%s'", iFD, strerror(errno) );
return -1;
}
return st.st_size;
}
int64_t sphGetFileSize ( const CSphString& sFile, CSphString * sError )
{
struct_stat st = {0};
if ( stat ( sFile.cstr(), &st )<0 )
{
if ( sError )
sError->SetSprintf ( "failed to stat file '%s': '%s'", sFile.cstr(), strerror ( errno ) );
return -1;
}
return st.st_size;
}
bool sphTruncate ( int iFD )
{
#if _WIN32
return SetEndOfFile ( (HANDLE) _get_osfhandle(iFD) )!=0;
#else
auto iPos = ::lseek ( iFD, 0, SEEK_CUR );
if ( iPos>0 )
return ::ftruncate ( iFD, iPos )==0;
sphWarning ( "sphTruncate: failed seek. Error: %d '%s'", errno, strerrorm ( errno ) );
return false;
#endif
}
void sphInitIOStats()
{
g_bCollectIOStats = true;
}
void sphDoneIOStats()
{
g_bCollectIOStats = false;
}
static bool IsSlash ( char c )
{
return c=='/' || c=='\\';
}
CSphString sphNormalizePath( const CSphString & sOrigPath )
{
CSphVector<Str_t> dChunks;
const char * szBegin = sOrigPath.scstr();
const char * szEnd = szBegin + sOrigPath.Length();
const char * szPath = szBegin;
int iLevel = 0;
while ( szPath<szEnd )
{
const char * szSlash = szEnd;
for ( const char * p = szPath; p < szEnd; p++ )
if ( IsSlash(*p) )
{
szSlash = p;
break;
}
auto iChunkLen = szSlash - szPath;
switch ( iChunkLen )
{
case 0: // empty chunk skipped
++szPath;
continue;
case 1: // simple dot chunk skipped
if ( *szPath=='.' )
{
szPath += 2;
continue;
}
break;
case 2: // double dot abandons chunks, then decrease level
if ( szPath[0]=='.' && szPath[1]=='.' )
{
if ( dChunks.IsEmpty())
--iLevel;
else
dChunks.Pop();
szPath += 3;
continue;
}
break;
default:
break;
}
dChunks.Add( { szPath, iChunkLen } );
szPath = szSlash + 1;
}
StringBuilder_c sResult( "/" );
if ( *szBegin=='/' )
sResult.AppendRawChunk ( {"/", 1} );
else
while ( iLevel++<0 )
sResult << "..";
for ( const auto & dChunk : dChunks )
sResult.AppendChunk(dChunk);
return sResult.cstr();
}
CSphString sphGetCwd()
{
CSphFixedVector<char> sBuf ( 65536 );
return getcwd ( sBuf.begin(), sBuf.GetLength() );
}
int64_t sphRead ( int iFD, void * pBuf, size_t iCount )
{
CSphIOStats * pIOStats = GetIOStats();
int64_t tmStart = 0;
if ( pIOStats )
tmStart = sphMicroTimer();
int64_t iRead = ::read ( iFD, pBuf, (int) iCount );
if ( pIOStats )
{
pIOStats->m_iReadTime += sphMicroTimer() - tmStart;
pIOStats->m_iReadOps++;
pIOStats->m_iReadBytes += (-1==iRead) ? 0 : iCount;
}
return iRead;
}
bool sphWrite ( int iFD, const void * pBuf, size_t iSize )
{
return ( iSize==(size_t)::write ( iFD, pBuf, (int) iSize ) );
}
bool sphWrite ( int iFD, const Str_t & dBuf )
{
return ( dBuf.second==(int) ::write ( iFD, dBuf.first, (int) dBuf.second) );
}
#if _WIN32
static void AddFile ( StrVec_t & dFilesFound, const CSphString & sPath, const WIN32_FIND_DATA & tFFData, bool bNeedDirs )
{
bool bDir = !!( tFFData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY );
if ( bDir && !bNeedDirs )
return;
if ( sPath.IsEmpty() )
dFilesFound.Add ( tFFData.cFileName );
else
dFilesFound.Add().SetSprintf ( "%s%s", sPath.cstr(), tFFData.cFileName );
}
#endif
StrVec_t FindFiles ( const char * szPath, bool bNeedDirs )
{
StrVec_t dFilesFound;
#if _WIN32
WIN32_FIND_DATA tFFData;
const char * szLastSlash = NULL;
for ( const char * s = szPath; *s; s++ )
if ( *s=='/' || *s=='\\' )
szLastSlash = s;
CSphString sPath;
if ( szLastSlash )
{
sPath = szPath;
sPath = sPath.SubString ( 0, szLastSlash - szPath + 1 );
}
HANDLE hFind = FindFirstFile ( szPath, &tFFData );
if ( hFind!=INVALID_HANDLE_VALUE )
{
AddFile ( dFilesFound, sPath, tFFData, bNeedDirs );
while ( FindNextFile ( hFind, &tFFData )!=0 )
AddFile ( dFilesFound, sPath, tFFData, bNeedDirs );
FindClose(hFind);
}
#else
glob_t tGlob;
glob ( szPath, GLOB_MARK | GLOB_NOSORT, NULL, &tGlob );
if ( tGlob.gl_pathv )
for ( int i = 0; i < (int)tGlob.gl_pathc; i++ )
{
const char * szPathName = tGlob.gl_pathv[i];
if ( !szPathName )
continue;
size_t iLen = strlen ( szPathName );
if ( !iLen || ( !bNeedDirs && szPathName[iLen-1]=='/' ) )
continue;
dFilesFound.Add ( szPathName );
}
globfree ( &tGlob );
#endif
dFilesFound.Uniq();
return dFilesFound;
}
bool MkDir ( const char * szDir )
{
if ( sphDirExists ( szDir ) )
return true;
#if _WIN32
if ( mkdir ( szDir ) )
#else
if ( mkdir ( szDir, S_IRWXU ) )
#endif
return false;
return true;
}
bool CopyFile ( const CSphString & sSource, const CSphString & sDest, CSphString & sError, int iMode )
{
const int BUFFER_SIZE = 1048576;
CSphFixedVector<BYTE> dBuffer(BUFFER_SIZE);
CSphAutofile tSource;
int iSrcFD = tSource.Open ( sSource, SPH_O_READ, sError );
if ( iSrcFD<0 )
return false;
CSphAutofile tDest;
int iDstFD = tDest.Open ( sDest, iMode, sError );
if ( iDstFD<0 )
return false;
int64_t iRead = 0;
while ( ( iRead = sphRead ( iSrcFD, dBuffer.Begin(), dBuffer.GetLength() ) ) > 0 )
{
if ( !sphWrite ( iDstFD, dBuffer.Begin(), iRead ) )
{
iRead = -1;
break;
}
}
if ( iRead<0 )
{
sError.SetSprintf ( "Unable to copy file '%s' to '%s': %s", sSource.cstr(), sDest.cstr(), strerrorm(errno) );
return false;
}
return true;
}
bool RenameFiles ( const StrVec_t & dSrc, const StrVec_t & dDst, CSphString & sError )
{
bool bError = false;
ARRAY_FOREACH ( i, dSrc )
{
if ( sph::rename ( dSrc[i].cstr(), dDst[i].cstr() ) )
{
sError.SetSprintf ( "failed to rename %s to %s", dSrc[i].cstr(), dDst[i].cstr() );
bError = true;
}
}
return bError;
}
bool RenameWithRollback ( const StrVec_t & dSrc, const StrVec_t & dDst, CSphString & sError )
{
assert ( dSrc.GetLength()==dDst.GetLength() );
if ( !dSrc.GetLength() )
return true;
CSphBitvec dRenamed ( dSrc.GetLength() );
bool bError = false;
ARRAY_FOREACH_COND ( i, dSrc, !bError )
{
if ( sph::rename ( dSrc[i].cstr(), dDst[i].cstr() ) )
{
sError.SetSprintf ( "failed to rename %s to %s", dSrc[i].cstr(), dDst[i].cstr() );
bError = true;
}
else
dRenamed.BitSet(i);
}
if ( !bError )
return true;
// roll back renaming
ARRAY_FOREACH ( i, dSrc )
{
if ( dRenamed.BitGet(i) )
sph::rename ( dDst[i].cstr(), dSrc[i].cstr() ); // ignore errors
}
return false;
}
namespace sph
{
int rename ( const char * sOld, const char * sNew )
{
#if _WIN32
if ( MoveFileEx ( sOld, sNew, MOVEFILE_REPLACE_EXISTING ) )
return 0;
errno = GetLastError();
return -1;
#else
return ::rename ( sOld, sNew );
#endif
}
}
// check path exists and also check daemon could write there
bool CheckPath ( const CSphString & sPath, bool bCheckWrite, CSphString & sError, const char * sCheckFileName )
{
if ( !sphDirExists ( sPath.cstr(), &sError ) )
{
sError.SetSprintf ( "cannot access directory %s, %s", sPath.cstr(), sError.cstr() );
return false;
}
if ( bCheckWrite )
{
CSphString sTmp;
sTmp.SetSprintf ( "%s/%s", sPath.cstr(), sCheckFileName );
CSphAutofile tFile ( sTmp, SPH_O_NEW, sError, true );
if ( tFile.GetFD()<0 )
{
sError.SetSprintf ( "directory %s write error: %s", sPath.cstr(), sError.cstr() );
return false;
}
}
return true;
}
bool IsPathAbsolute ( const CSphString & sPath )
{
if ( !sPath.Length() )
return false;
#if _WIN32
return !PathIsRelative ( sPath.cstr() );
#else
return sPath.cstr() && IsSlash ( sPath.cstr()[0] );
#endif
}
CSphString & StripPath ( CSphString & sPath )
{
if ( sPath.IsEmpty() )
return sPath;
const char * s = sPath.cstr();
const char * sLastSlash = s;
for ( ; *s; ++s )
if ( IsSlash(*s) )
sLastSlash = s;
if ( !IsSlash ( *sLastSlash ) )
return sPath;
auto iPos = (int)( sLastSlash - sPath.cstr() + 1 );
auto iLen = (int)( s - sPath.cstr() );
sPath = sPath.SubString ( iPos, iLen - iPos );
return sPath;
}
CSphString GetPathOnly ( const CSphString & sFullPath )
{
if ( sFullPath.IsEmpty() )
return CSphString();
const char * pStart = sFullPath.cstr();
const char * pCur = pStart + sFullPath.Length() - 1;
if ( IsSlash(*pCur) )
return sFullPath;
while ( pCur>pStart && !IsSlash ( pCur[-1] ) )
pCur--;
CSphString sPath;
if ( pCur==pStart )
sPath = sFullPath;
else
sPath.SetBinary ( pStart, pCur-pStart );
return sPath;
}
const char * GetExtension ( const CSphString & sFullPath )
{
if ( sFullPath.IsEmpty() )
return nullptr;
const char * pDot = strchr ( sFullPath.cstr(), '.' );
if ( !pDot || pDot[1]=='\0' )
return nullptr;
return pDot+1;
}
CSphString RealPath ( const CSphString& sPath )
{
#if _WIN32
char szFullPath[_MAX_PATH];
if ( _fullpath( szFullPath, sPath.cstr(), _MAX_PATH ) )
{
char * pStart = szFullPath;
while ( *pStart )
{
if ( *pStart=='\\' )
*pStart = '/';
pStart++;
}
return szFullPath;
}
#else
char szPath[PATH_MAX];
auto szResult = realpath ( sPath.cstr(), szPath );
if ( szResult )
return szResult;
#endif
return sPath;
}
bool IsSymlink ( const CSphString & sFile )
{
#if _WIN32
DWORD uAttrs = GetFileAttributes ( sFile.cstr() );
if ( uAttrs==INVALID_FILE_ATTRIBUTES )
return false;
if ( !( uAttrs & FILE_ATTRIBUTE_REPARSE_POINT ) )
return false;
WIN32_FIND_DATA tFindData;
HANDLE hFind = FindFirstFile ( sFile.cstr(), &tFindData );
if ( hFind==INVALID_HANDLE_VALUE )
return false;
bool bSymlink = tFindData.dwReserved0==IO_REPARSE_TAG_SYMLINK;
FindClose(hFind);
return bSymlink;
#else
struct_stat tStat = {0};
if ( lstat ( sFile.cstr(), &tStat ) )
return false; // not found
return S_ISLNK(tStat.st_mode);
#endif
}
bool ResolveSymlink ( const CSphString & sFile, CSphString & sResult )
{
sResult = sFile;
#if _WIN32
HANDLE hFile = CreateFile ( sFile.cstr(), 0, 0, nullptr, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, nullptr );
if ( hFile==INVALID_HANDLE_VALUE )
return false;
CHAR szTargetPath[MAX_PATH];
DWORD uBytesRead = GetFinalPathNameByHandle ( hFile, szTargetPath, MAX_PATH, FILE_NAME_NORMALIZED );
if ( uBytesRead )
{
sResult.SetBinary ( szTargetPath, uBytesRead );
if ( sResult.Begins ( R"(\\?\)" ) )
sResult = sResult.SubString ( 4, sResult.Length()-4 );
return true;
}
CloseHandle(hFile);
return false;
#else
char szPath[PATH_MAX];
ssize_t tLen = ::readlink ( sFile.cstr(), szPath, sizeof(szPath)-1 );
if ( tLen!=-1 )
{
sResult = CSphString ( szPath, tLen );
return true;
}
return false;
#endif
}
CSphString GetExecutablePath()
{
#if _WIN32
HMODULE hModule = GetModuleHandle(NULL);
CHAR szPath[MAX_PATH];
GetModuleFileName ( hModule, szPath, MAX_PATH );
return szPath;
#else
char szPath[PATH_MAX];
ssize_t tLen;
tLen = ::readlink ( "/proc/self/exe", szPath, sizeof(szPath)-1 );
if ( tLen!=-1 )
return CSphString ( szPath, tLen );
tLen = ::readlink ( "/proc/curproc/file", szPath, sizeof(szPath)-1 );
if ( tLen!=-1 )
return CSphString ( szPath, tLen );
tLen = ::readlink ( "/proc/self/path/a.out", szPath, sizeof(szPath)-1 );
if ( tLen!=-1 )
return CSphString ( szPath, tLen );
return "";
#endif
}
void SeekAndPutOffset ( CSphWriter & tWriter, SphOffset_t tOffset, SphOffset_t tValue )
{
SphOffset_t tTotalSize = tWriter.GetPos();
// order matters here
tWriter.Flush(); // store collected data as SeekTo may get rid of buffer collected so far
tWriter.SeekTo(tOffset);
tWriter.PutOffset(tValue);
tWriter.SeekTo(tTotalSize);
}
| 20,648
|
C++
|
.cpp
| 781
| 24.033291
| 126
| 0.660956
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,872
|
jsonsi.cpp
|
manticoresoftware_manticoresearch/src/jsonsi.cpp
|
//
// Copyright (c) 2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "jsonsi.h"
#include "sphinxjson.h"
#include "sphinxint.h"
#include "secondarylib.h"
class JsonRowIterator_i
{
public:
virtual ~JsonRowIterator_i() = default;
virtual bool Setup ( CSphString & sError ) = 0;
virtual bool Next() = 0;
virtual const BYTE * GetJSON( const CSphAttrLocator & tLocator ) const = 0;
virtual RowID_t GetRowID() const = 0;
};
class JsonRowIterator_c : public JsonRowIterator_i
{
public:
JsonRowIterator_c ( const CSphRowitem * pPool, int64_t iNumRows, int iStride, const BYTE * pBlobPool );
bool Setup ( CSphString & sError ) override { return true; }
bool Next() override;
const BYTE * GetJSON ( const CSphAttrLocator & tLocator ) const override { return sphGetBlobAttr ( m_pPtr-m_iStride, tLocator, m_pBlobPool ).first; }
RowID_t GetRowID() const override { return m_tRowID-1; }
private:
const CSphRowitem * m_pEnd = nullptr;
const CSphRowitem * m_pPtr = nullptr;
RowID_t m_tRowID = 0;
int m_iStride = 0;
const BYTE * m_pBlobPool = nullptr;
};
JsonRowIterator_c::JsonRowIterator_c ( const CSphRowitem * pPool, int64_t iNumRows, int iStride, const BYTE * pBlobPool )
: m_pEnd ( pPool + ( iNumRows*iStride ) )
, m_pPtr ( pPool )
, m_iStride ( iStride )
, m_pBlobPool ( pBlobPool )
{}
bool JsonRowIterator_c::Next()
{
if ( m_pPtr>=m_pEnd )
return false;
m_pPtr += m_iStride;
m_tRowID++;
return true;
}
//////////////////////////////////////////////////////////////////////
class JsonRowIteratorFile_c : public JsonRowIterator_i
{
public:
JsonRowIteratorFile_c ( const CSphString & sOffsetFile, const CSphString & sSPB, int64_t iNumRows );
bool Setup ( CSphString & sError ) override;
bool Next() override;
const BYTE * GetJSON ( const CSphAttrLocator & tLocator ) const override { return sphGetBlobAttr ( m_dRow.Begin(), tLocator ).first; }
RowID_t GetRowID() const override { return m_tRowID-1; }
private:
CSphAutoreader m_tReaderOffset;
CSphAutoreader m_tReaderSPB;
CSphString m_sOffsetFile;
CSphString m_sSPBFile;
int64_t m_iNumRows = 0;
SphOffset_t m_tPrevOffset = 0;
RowID_t m_tRowID = 0;
CSphVector<BYTE> m_dRow;
};
JsonRowIteratorFile_c::JsonRowIteratorFile_c ( const CSphString & sOffsetFile, const CSphString & sSPB, int64_t iNumRows )
: m_sOffsetFile ( sOffsetFile )
, m_sSPBFile ( sSPB )
, m_iNumRows ( iNumRows )
{}
bool JsonRowIteratorFile_c::Setup ( CSphString & sError )
{
if ( !m_tReaderOffset.Open ( m_sOffsetFile, sError ) )
return false;
if ( !m_tReaderSPB.Open ( m_sSPBFile, sError ) )
return false;
m_tReaderSPB.SeekTo ( sizeof(SphOffset_t), -1 );
return true;
}
bool JsonRowIteratorFile_c::Next()
{
if ( m_tRowID>=m_iNumRows )
return false;
bool bDelta = !!m_tReaderOffset.GetByte();
SphOffset_t tOffset = m_tReaderOffset.UnzipOffset();
if ( bDelta )
tOffset += m_tPrevOffset;
SphOffset_t tSize = m_tReaderOffset.UnzipOffset();
m_dRow.Resize(tSize);
m_tReaderSPB.SeekTo ( tOffset, -1 );
m_tReaderSPB.Read ( m_dRow.Begin(), tSize );
m_tRowID++;
m_tPrevOffset = tOffset;
return true;
}
//////////////////////////////////////////////////////////////////////
class JsonTypeDeductor_c
{
public:
void SubmitRow ( const CSphString & sAttr, const BYTE * pJson )
{
bson::Bson_c tBson ( { (BYTE*)pJson, 1 } );
ProcessJsonObj ( sAttr, tBson, [this]( const CSphString & sAttrName, const bson::NodeHandle_t & tNode )
{
StoredType_t * pStored = (*m_pTypes)(sAttrName);
ESphJsonType eType = pStored ? pStored->m_eType : JSON_EOF;
bool bTypeOk = DetermineNodeType ( eType, tNode );
if ( pStored )
{
bool bOldTypeOk = pStored->m_bTypeOk;
*pStored = { eType, 0, bOldTypeOk && bTypeOk };
}
else
m_pTypes->Add ( { eType, 0, bTypeOk }, sAttrName );
} );
}
common::Schema_t CreateSchema()
{
m_tSchema.resize(0);
for ( auto & i : *m_pTypes )
{
if ( !i.second.m_bTypeOk )
continue;
i.second.m_iAttr = m_tSchema.size();
common::StringHash_fn fnStringCalcHash = nullptr;
common::AttrType_e eColumnarType = ToColumnarType ( i.second.m_eType );
// fixme! make default collation configurable
if ( eColumnarType==common::AttrType_e::STRING )
fnStringCalcHash = LibcCIHash_fn::Hash;
m_tSchema.push_back ( { i.first.cstr(), eColumnarType, fnStringCalcHash } );
}
return m_tSchema;
}
void SetBuilder ( SI::Builder_i * pBuilder ) { m_pBuilder = pBuilder; }
void ConvertTypeAndStore ( const CSphString & sAttr, RowID_t tRowID, const BYTE * pJson )
{
assert(m_pBuilder);
m_pBuilder->SetRowID(tRowID);
bson::Bson_c tBson ( { (BYTE*)pJson, 1 } );
ProcessJsonObj ( sAttr, tBson, [this]( const CSphString & sAttrName, const bson::NodeHandle_t & tNode )
{
StoredType_t * pStored = (*m_pTypes)(sAttrName);
if ( pStored && pStored->m_bTypeOk )
ConvertAndStore ( tNode, pStored->m_eType, pStored->m_iAttr );
} );
}
private:
struct StoredType_t
{
ESphJsonType m_eType = JSON_EOF;
int m_iAttr = 0;
bool m_bTypeOk = true;
};
using AttrHash_c = CSphOrderedHash<StoredType_t, CSphString, CSphStrHashFunc, 16384>;
std::unique_ptr<AttrHash_c> m_pTypes { std::make_unique<AttrHash_c>() };
common::Schema_t m_tSchema;
SI::Builder_i * m_pBuilder = nullptr;
template <typename ACTION>
void ProcessJsonObj ( const CSphString & sAttrPrefix, const bson::NodeHandle_t & tNode, ACTION && tAction )
{
if ( tNode.second==JSON_EOF )
return;
if ( tNode.second!=JSON_ROOT )
tAction ( sAttrPrefix, tNode );
if ( tNode.second!=JSON_ROOT && tNode.second!=JSON_OBJECT )
return;
bson::Bson_c tBson(tNode);
tBson.ForEach ( [this,sAttrPrefix,tAction]( CSphString && sName, const bson::NodeHandle_t & tNode )
{
CSphString sAttrName;
sAttrName.SetSprintf ( "%s['%s']", sAttrPrefix.cstr(), sName.cstr() );
ProcessJsonObj ( sAttrName, tNode, tAction );
} );
}
void ConvertAndStore ( int64_t iValue, ESphJsonType eType, int iAttr )
{
switch ( eType )
{
case JSON_INT32:
case JSON_INT64:
case JSON_OBJECT:
m_pBuilder->SetAttr ( iAttr, iValue );
break;
case JSON_DOUBLE:
m_pBuilder->SetAttr ( iAttr, sphF2DW(float(iValue)) );
break;
case JSON_STRING:
{
char szBuf[64];
snprintf ( szBuf, 64, INT64_FMT, iValue );
m_pBuilder->SetAttr ( iAttr, (const uint8_t*)szBuf, strlen(szBuf) );
}
break;
default:
assert ( 0 && "Internal error on json type conversion");
break;
}
}
void ConvertAndStore ( double fValue, ESphJsonType eType, int iAttr )
{
switch ( eType )
{
case JSON_DOUBLE:
m_pBuilder->SetAttr ( iAttr, sphF2DW ( (float)fValue ) );
break;
case JSON_STRING:
{
char szBuf[64];
snprintf ( szBuf, 64, "%f", (float)fValue );
m_pBuilder->SetAttr ( iAttr, (const uint8_t*)szBuf, strlen(szBuf) );
}
break;
default:
assert ( 0 && "Internal error on json type conversion");
break;
}
}
void ConvertAndStore ( const CSphString & sValue, ESphJsonType eType, int iAttr )
{
switch ( eType )
{
case JSON_STRING:
m_pBuilder->SetAttr ( iAttr, (const uint8_t*)sValue.cstr(), sValue.Length() );
break;
default:
assert ( 0 && "Internal error on json type conversion");
break;
}
}
void ConvertAndStore ( const bson::NodeHandle_t & tNode, ESphJsonType eType, int iAttr )
{
switch ( tNode.second )
{
case JSON_INT32:
case JSON_INT64:
ConvertAndStore ( bson::Int(tNode), eType, iAttr );
break;
case JSON_DOUBLE:
ConvertAndStore ( bson::Double(tNode), eType, iAttr );
break;
case JSON_STRING:
ConvertAndStore ( bson::String(tNode), eType, iAttr );
break;
case JSON_STRING_VECTOR:
bson::ForEach ( tNode, [this,eType,iAttr]( const CSphString & sName, const bson::NodeHandle_t & tLocator )
{
ConvertAndStore ( bson::String(tLocator), eType, iAttr );
} );
break;
case JSON_INT32_VECTOR:
case JSON_INT64_VECTOR:
bson::ForEach ( tNode, [this,eType,iAttr]( const CSphString & sName, const bson::NodeHandle_t & tLocator )
{
ConvertAndStore ( bson::Int(tLocator), eType, iAttr );
} );
break;
case JSON_DOUBLE_VECTOR:
bson::ForEach ( tNode, [this,eType,iAttr]( const CSphString & sName, const bson::NodeHandle_t & tLocator )
{
ConvertAndStore ( bson::Double(tLocator), eType, iAttr );
} );
break;
case JSON_TRUE:
case JSON_FALSE:
case JSON_NULL:
ConvertAndStore ( bson::Int(tNode), eType, iAttr );
break;
case JSON_OBJECT:
ConvertAndStore ( int64_t(1), eType, iAttr );
break;
case JSON_MIXED_VECTOR:
break; // assume 0-length
default:
assert ( 0 && "Internal error: unsupported json type" );
break;
}
}
static common::AttrType_e ToColumnarType ( ESphJsonType eType )
{
switch ( eType )
{
case JSON_INT32: return common::AttrType_e::UINT32;
case JSON_INT64: return common::AttrType_e::INT64;
case JSON_DOUBLE: return common::AttrType_e::FLOAT;
case JSON_STRING: return common::AttrType_e::STRING;
default:
assert ( 0 && "Internal error: unsupported json type" );
return common::AttrType_e::NONE;
}
}
static ESphJsonType ToWidestType ( ESphJsonType ePrevType, ESphJsonType eNodeType )
{
assert ( ePrevType<=JSON_STRING && eNodeType<=JSON_STRING );
return Max ( ePrevType, eNodeType );
}
static bool DetermineNodeType ( ESphJsonType & eType, const bson::NodeHandle_t & tNode )
{
ESphJsonType eNodeType = tNode.second;
switch ( eNodeType )
{
case JSON_INT32:
case JSON_INT64:
case JSON_DOUBLE:
case JSON_STRING:
eType = ToWidestType ( eType, eNodeType );
break;
case JSON_TRUE:
case JSON_FALSE:
case JSON_NULL:
eType = ToWidestType ( eType, JSON_INT32 );
break;
case JSON_STRING_VECTOR:
eType = ToWidestType ( eType, JSON_STRING );
break;
case JSON_INT32_VECTOR:
eType = ToWidestType ( eType, JSON_INT32 );
break;
case JSON_INT64_VECTOR:
eType = ToWidestType ( eType, JSON_INT64 );
break;
case JSON_DOUBLE_VECTOR:
eType = ToWidestType ( eType, JSON_DOUBLE );
break;
case JSON_OBJECT:
eType = ToWidestType ( eType, JSON_INT32 );
break;
case JSON_MIXED_VECTOR:
{
const BYTE * p = tNode.first;
sphJsonUnpackInt(&p); // total len
if ( sphJsonUnpackInt(&p) )
return false;
// 0-length mixed vectors are supported
eType = ToWidestType ( eType, JSON_INT32 );
}
break;
default:
break;
}
return true;
}
};
////////////////////////////////////////////////////////////////////
template <typename CREATE_ITERATOR>
static bool BuildJsonSI ( const StrVec_t & dAttributes, const ISphSchema & tSchema, const CSphString & sFile, const CSphString & sTmpFile, CREATE_ITERATOR && fnCreateIterator, CSphString & sError )
{
CSphVector<CSphAttrLocator> dLocators;
for ( auto & i : dAttributes )
{
const CSphColumnInfo * pAttr = tSchema.GetAttr ( i.cstr() );
assert(pAttr);
dLocators.Add ( pAttr->m_tLocator );
}
JsonTypeDeductor_c tDeductor;
{
std::unique_ptr<JsonRowIterator_i> pIterator = std::unique_ptr<JsonRowIterator_i> ( fnCreateIterator(sError) );
while ( pIterator->Next() )
ARRAY_FOREACH( i, dAttributes )
tDeductor.SubmitRow ( dAttributes[i], pIterator->GetJSON ( dLocators[i] ) );
}
common::Schema_t tSISchema = tDeductor.CreateSchema();
// FIXME!!! pass these settings to the function, don't use default
BuildBufferSettings_t tSettings; // use default buffer settings
std::unique_ptr<SI::Builder_i> pBuilder = CreateSecondaryIndexBuilder ( tSISchema, tSettings.m_iSIMemLimit, sTmpFile, tSettings.m_iBufferStorage, sError );
if ( !pBuilder )
return false;
tDeductor.SetBuilder ( pBuilder.get() );
{
std::unique_ptr<JsonRowIterator_i> pIterator = std::unique_ptr<JsonRowIterator_i> ( fnCreateIterator(sError) );
while ( pIterator->Next() )
ARRAY_FOREACH( i, dAttributes )
tDeductor.ConvertTypeAndStore ( dAttributes[i], pIterator->GetRowID(), pIterator->GetJSON ( dLocators[i] ) );
}
std::string sErrorSTL;
if ( !pBuilder->Done(sErrorSTL) )
{
sError = sErrorSTL.c_str();
return false;
}
StrVec_t dSrc, dDst;
dSrc.Add(sTmpFile);
dDst.Add(sFile);
if ( !RenameWithRollback ( dSrc, dDst, sError ) )
return false;
return true;
}
////////////////////////////////////////////////////////////////////
class JsonSIBuilder_c : public JsonSIBuilder_i
{
public:
JsonSIBuilder_c ( const ISphSchema & tSchema, const CSphString & sSPB, const CSphString & sSIFile );
bool Setup ( CSphString & sError ) { return m_tWriter.OpenFile ( m_sTmpOffsetFile, sError ); }
void AddRowOffsetSize ( std::pair<SphOffset_t,SphOffset_t> tOffsetSize ) override;
bool Done ( CSphString & sError ) override;
private:
const ISphSchema & m_tSchema;
CSphString m_sSPB;
CSphString m_sSIFile;
CSphString m_sTmpOffsetFile;
CSphWriter m_tWriter;
SphOffset_t m_tPrevOffset = 0;
int64_t m_iNumRows = 0;
};
JsonSIBuilder_c::JsonSIBuilder_c ( const ISphSchema & tSchema, const CSphString & sSPB, const CSphString & sSIFile )
: m_tSchema ( tSchema )
, m_sSPB ( sSPB )
, m_sSIFile ( sSIFile )
{
m_sTmpOffsetFile.SetSprintf ( "%s.offset.tmp", m_sSIFile.cstr() );
}
void JsonSIBuilder_c::AddRowOffsetSize ( std::pair<SphOffset_t,SphOffset_t> tOffsetSize )
{
if ( tOffsetSize.first>=m_tPrevOffset )
{
m_tWriter.PutByte(1);
m_tWriter.ZipOffset ( tOffsetSize.first - m_tPrevOffset );
}
else
{
m_tWriter.PutByte(0);
m_tWriter.ZipOffset ( tOffsetSize.first );
}
m_tWriter.ZipOffset ( tOffsetSize.second );
m_tPrevOffset = tOffsetSize.first;
m_iNumRows++;
}
bool JsonSIBuilder_c::Done ( CSphString & sError )
{
m_tWriter.CloseFile();
StrVec_t dAttributes;
for ( int i = 0; i < m_tSchema.GetAttrsCount(); i++ )
if ( m_tSchema.GetAttr(i).IsIndexedSI() )
dAttributes.Add ( m_tSchema.GetAttr(i).m_sName );
CSphString sTmpSIFile;
sTmpSIFile.SetSprintf ( "%s.jsonsi.tmp", m_sSIFile.cstr() );
bool bOk = BuildJsonSI ( dAttributes, m_tSchema, m_sSIFile, sTmpSIFile, [this] ( CSphString & sError ) -> std::unique_ptr<JsonRowIterator_i>
{
std::unique_ptr<JsonRowIterator_i> pIterator = std::make_unique<JsonRowIteratorFile_c> ( m_sTmpOffsetFile, m_sSPB, m_iNumRows );
if ( !pIterator->Setup(sError) )
pIterator.reset();
return pIterator;
}, sError );
::unlink ( m_sTmpOffsetFile.cstr() );
return bOk;
}
////////////////////////////////////////////////////////////////////
bool BuildJsonSI ( const CSphString & sAttribute, const CSphRowitem * pPool, int64_t iNumRows, const ISphSchema & tSchema, const BYTE * pBlobPool, const CSphString & sFile, const CSphString & sTmpFile, CSphString & sError )
{
if ( sphFileExists(sFile) )
{
sError.SetSprintf ( "file '%s' already exists", sFile.cstr() );
return false;
}
if ( sAttribute==GetFixedJsonSIAttrName() )
{
sError.SetSprintf ( "secondary index attribute name '%s' not allowed", sAttribute.cstr() );
return false;
}
StrVec_t dAttributes;
dAttributes.Add(sAttribute);
int iStride = tSchema.GetRowSize();
return BuildJsonSI ( dAttributes, tSchema, sFile, sTmpFile, [pPool, iNumRows, iStride, pBlobPool] ( CSphString & sError ) -> std::unique_ptr<JsonRowIterator_i>
{
return std::make_unique<JsonRowIterator_c> ( pPool, iNumRows, iStride, pBlobPool );
}, sError );
}
std::unique_ptr<JsonSIBuilder_i> CreateJsonSIBuilder ( const ISphSchema & tSchema, const CSphString & sSPB, const CSphString & sSIFile, CSphString & sError )
{
auto pBuilder = std::make_unique<JsonSIBuilder_c> ( tSchema, sSPB, sSIFile );
if ( !pBuilder->Setup(sError) )
pBuilder.reset();
return pBuilder;
}
CSphString UnifyJsonFieldName ( const CSphString & sName )
{
enum class State_e
{
NONE,
KEY_NAME,
QUOTES
};
State_e eState = State_e::KEY_NAME;
const char * pStart = sName.cstr();
const char * pMax = pStart + sName.Length() + 1;
const char * p = pStart;
CSphString sRes;
while ( p < pMax )
{
switch ( eState )
{
case State_e::NONE:
switch ( *p )
{
case '[':
if ( *(p+1)=='\'' )
{
eState = State_e::QUOTES;
p++;
pStart = p+1;
}
break;
case '.':
eState = State_e::KEY_NAME;
pStart = p+1;
break;
default: break;
}
break;
case State_e::QUOTES:
if ( *p=='\'' && *(p+1)==']' )
{
eState = State_e::NONE;
CSphString sKey;
sKey.SetBinary( pStart, p-pStart );
sRes.SetSprintf ( "%s['%s']", sRes.cstr(), sKey.cstr() );
p++;
}
break;
case State_e::KEY_NAME:
if ( *p=='.' || *p=='[' || *p=='\0' )
{
eState = State_e::NONE;
CSphString sKey;
sKey.SetBinary( pStart, p-pStart );
if ( sRes.IsEmpty() )
sRes = sKey;
else
sRes.SetSprintf ( "%s['%s']", sRes.cstr(), sKey.cstr() );
p--;
}
break;
default:
break;
}
p++;
}
return sRes;
}
CSphString GetFixedJsonSIAttrName()
{
return "_extra_";
}
| 17,229
|
C++
|
.cpp
| 547
| 28.440585
| 223
| 0.680022
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,873
|
queryprofile.cpp
|
manticoresoftware_manticoresearch/src/queryprofile.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "queryprofile.h"
QueryProfile_c::QueryProfile_c()
{
Start ( SPH_QSTATE_TOTAL );
}
ESphQueryState QueryProfile_c::Switch ( ESphQueryState eNew )
{
int64_t tmNow = sphMicroTimer();
ESphQueryState eOld = m_eState;
m_dSwitches [ eOld ]++;
m_tmTotal [ eOld ] += tmNow - m_tmStamp;
m_eState = eNew;
m_tmStamp = tmNow;
return eOld;
}
void QueryProfile_c::Start ( ESphQueryState eNew )
{
memset ( m_dSwitches, 0, sizeof(m_dSwitches) );
memset ( m_tmTotal, 0, sizeof(m_tmTotal) );
m_eState = eNew;
m_tmStamp = sphMicroTimer();
m_iPseudoShards = 1;
m_iMaxMatches = 0;
}
void QueryProfile_c::AddMetric ( const QueryProfile_c & tData )
{
// fixme! m.b. invent a way to display data from different profilers with kind of multiplier?
for ( int i = 0; i<SPH_QSTATE_TOTAL; ++i )
{
m_dSwitches[i] += tData.m_dSwitches[i];
m_tmTotal[i] += tData.m_tmTotal[i];
}
}
/// stop profiling
void QueryProfile_c::Stop()
{
Switch ( SPH_QSTATE_TOTAL );
}
//////////////////////////////////////////////////////////////////////////
CSphScopedProfile::CSphScopedProfile ( QueryProfile_c * pProfile, ESphQueryState eNewState )
{
m_pProfile = pProfile;
m_eOldState = SPH_QSTATE_UNKNOWN;
if ( m_pProfile )
m_eOldState = m_pProfile->Switch ( eNewState );
}
CSphScopedProfile::~CSphScopedProfile()
{
if ( m_pProfile )
m_pProfile->Switch ( m_eOldState );
}
| 1,848
|
C++
|
.cpp
| 62
| 28.080645
| 94
| 0.695824
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,874
|
secondaryindex.cpp
|
manticoresoftware_manticoresearch/src/secondaryindex.cpp
|
//
//
// Copyright (c) 2018-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "secondaryindex.h"
#include <algorithm>
#include "histogram.h"
#include "sphinxint.h"
#include "killlist.h"
#include "attribute.h"
#include "columnarfilter.h"
#include <queue>
#include "util/util.h"
#include "secondarylib.h"
//////////////////////////////////////////////////////////////////////////
bool ReturnIteratorResult ( RowID_t * pRowID, RowID_t * pRowIdStart, RowIdBlock_t & dRowIdBlock )
{
if ( pRowID==pRowIdStart )
return false;
dRowIdBlock = RowIdBlock_t(pRowIdStart, pRowID-pRowIdStart);
return true;
}
//////////////////////////////////////////////////////////////////////////
class SecondaryIndexIterator_c : public RowidIterator_i
{
protected:
static const int MAX_COLLECTED = 1024;
CSphFixedVector<RowID_t> m_dCollected {MAX_COLLECTED};
};
//////////////////////////////////////////////////////////////////////////
static RowIdBlock_t DoRowIdFiltering ( const RowIdBlock_t & dRowIdBlock, const RowIdBoundaries_t & tBoundaries, CSphVector<RowID_t> & dCollected )
{
RowID_t tMinSpanRowID = dRowIdBlock.First();
RowID_t tMaxSpanRowID = dRowIdBlock.Last();
if ( tMaxSpanRowID < tBoundaries.m_tMinRowID || tMinSpanRowID > tBoundaries.m_tMaxRowID )
return {};
dCollected.Resize ( dRowIdBlock.GetLength() );
RowID_t * pRowIdStart = dCollected.Begin();
RowID_t * pRowID = pRowIdStart;
for ( auto i : dRowIdBlock )
{
if ( i>=tBoundaries.m_tMinRowID && i<=tBoundaries.m_tMaxRowID )
*pRowID++ = i;
}
return { pRowIdStart, pRowID-pRowIdStart };
}
//////////////////////////////////////////////////////////////////////////
template <typename T, bool ROWID_LIMITS>
class IteratorState_T
{
public:
std::unique_ptr<T> m_pIterator;
const RowID_t * m_pRowID = nullptr;
const RowID_t * m_pRowIDMax = nullptr;
RowIdBoundaries_t m_tBoundaries;
IteratorState_T() = default;
IteratorState_T ( T * pIterator ) : m_pIterator(pIterator) {}
FORCE_INLINE bool RewindTo ( RowID_t tRowID );
FORCE_INLINE bool WarmupDocs();
FORCE_INLINE bool WarmupDocs ( RowID_t tRowID );
void Stop();
FORCE_INLINE bool IsStopped() const { return m_bStopped; }
static inline bool IsLess ( const IteratorState_T * pA, const IteratorState_T * pB ) { return ( *pA->m_pRowID < *pB->m_pRowID ); }
private:
bool m_bStopped = false;
CSphVector<RowID_t> m_dCollected;
};
template <typename T, bool ROWID_LIMITS>
bool IteratorState_T<T, ROWID_LIMITS>::WarmupDocs()
{
assert(m_pIterator);
assert ( !ROWID_LIMITS ); // we assume that underlying iterators do the filtering
RowIdBlock_t dRowIdBlock;
if ( !m_pIterator->GetNextRowIdBlock(dRowIdBlock) )
{
Stop();
return false;
}
m_pRowID = dRowIdBlock.Begin();
m_pRowIDMax = m_pRowID+dRowIdBlock.GetLength();
return true;
}
template <>
bool IteratorState_T<common::BlockIterator_i,true>::WarmupDocs()
{
assert(m_pIterator);
RowIdBlock_t dRowIdBlock;
do
{
util::Span_T<uint32_t> dSpan;
if ( !m_pIterator->GetNextRowIdBlock(dSpan) )
{
Stop();
return false;
}
RowID_t tMinSpanRowID = dSpan.front();
RowID_t tMaxSpanRowID = dSpan.back();
dRowIdBlock = { (RowID_t *)dSpan.begin(), (int64_t)dSpan.size() };
// we need additional filtering only on first and last blocks
// per-block filtering is performed inside MCL
if ( tMinSpanRowID < m_tBoundaries.m_tMinRowID || tMaxSpanRowID > m_tBoundaries.m_tMaxRowID )
dRowIdBlock = DoRowIdFiltering ( dRowIdBlock, m_tBoundaries, m_dCollected );
}
while ( !dRowIdBlock.GetLength() );
m_pRowID = (const RowID_t *)dRowIdBlock.begin();
m_pRowIDMax = (const RowID_t *)dRowIdBlock.end();
return true;
}
template <>
bool IteratorState_T<common::BlockIterator_i,false>::WarmupDocs()
{
assert(m_pIterator);
util::Span_T<uint32_t> dRowIdBlock;
if ( !m_pIterator->GetNextRowIdBlock(dRowIdBlock) )
{
Stop();
return false;
}
m_pRowID = (const RowID_t *)dRowIdBlock.begin();
m_pRowIDMax = (const RowID_t *)dRowIdBlock.end();
return true;
}
template <typename T, bool ROWID_LIMITS>
bool IteratorState_T<T,ROWID_LIMITS>::WarmupDocs ( RowID_t tRowID )
{
if ( !m_pIterator->HintRowID(tRowID) )
{
Stop();
return false;
}
return WarmupDocs();
}
template <typename T, bool ROWID_LIMITS>
void IteratorState_T<T,ROWID_LIMITS>::Stop()
{
m_pRowID = m_pRowIDMax = nullptr;
m_bStopped = true;
}
template <typename T, bool ROWID_LIMITS>
bool IteratorState_T<T,ROWID_LIMITS>::RewindTo ( RowID_t tRowID )
{
assert ( !m_bStopped );
while ( tRowID>*(m_pRowIDMax-1) )
{
if ( !WarmupDocs(tRowID) )
return false;
}
const RowID_t * pRowID = m_pRowID;
while ( true )
{
while ( pRowID < m_pRowIDMax && *pRowID < tRowID )
pRowID++;
if ( pRowID<m_pRowIDMax )
break;
if ( !WarmupDocs() )
return false;
pRowID = m_pRowID;
}
m_pRowID = pRowID;
assert(pRowID);
return true;
}
//////////////////////////////////////////////////////////////////////////
template <typename T, bool ROWID_LIMITS>
class RowidIterator_Base_T : public SecondaryIndexIterator_c
{
public:
RowidIterator_Base_T ( T ** ppIterators, int iNumIterators, const RowIdBoundaries_t * pBoundaries );
int64_t GetNumProcessed() const override;
protected:
using IteratorState_t = IteratorState_T<T,ROWID_LIMITS>;
CSphFixedVector<IteratorState_t> m_dIterators;
};
template <typename T, bool ROWID_LIMITS>
RowidIterator_Base_T<T,ROWID_LIMITS>::RowidIterator_Base_T ( T ** ppIterators, int iNumIterators, const RowIdBoundaries_t * pBoundaries )
: m_dIterators ( iNumIterators )
{
for ( int i = 0; i < iNumIterators; i++ )
{
m_dIterators[i] = ppIterators[i];
if ( pBoundaries )
m_dIterators[i].m_tBoundaries = *pBoundaries;
}
}
template <typename T, bool ROWID_LIMITS>
int64_t RowidIterator_Base_T<T,ROWID_LIMITS>::GetNumProcessed() const
{
int64_t iTotal = 0;
for ( auto & i : m_dIterators )
iTotal += i.m_pIterator->GetNumProcessed();
return iTotal;
}
//////////////////////////////////////////////////////////////////////////
template <typename T, bool ROWID_LIMITS>
class RowidIterator_Intersect_T : public RowidIterator_Base_T<T,ROWID_LIMITS>
{
using BASE = RowidIterator_Base_T<T,ROWID_LIMITS>;
public:
RowidIterator_Intersect_T ( T ** ppIterators, int iNumIterators, const RowIdBoundaries_t * pBoundaries = nullptr );
bool HintRowID ( RowID_t tRowID ) override;
bool GetNextRowIdBlock ( RowIdBlock_t & dRowIdBlock ) override;
void SetCutoff ( int iCutoff ) override { m_iRowsLeft = iCutoff; }
bool WasCutoffHit() const override { return !m_iRowsLeft; }
void AddDesc ( CSphVector<IteratorDesc_t> & dDesc ) const override;
private:
int m_iRowsLeft = INT_MAX;
FORCE_INLINE bool AdvanceIterators();
FORCE_INLINE bool Advance ( int iIterator, RowID_t tRowID );
};
template <typename T, bool ROWID_LIMITS>
RowidIterator_Intersect_T<T,ROWID_LIMITS>::RowidIterator_Intersect_T ( T ** ppIterators, int iNumIterators, const RowIdBoundaries_t * pBoundaries )
: BASE ( ppIterators, iNumIterators, pBoundaries )
{
BASE::m_dIterators[0].WarmupDocs();
}
template <typename T, bool ROWID_LIMITS>
bool RowidIterator_Intersect_T<T,ROWID_LIMITS>::HintRowID ( RowID_t tRowID )
{
if ( BASE::m_dIterators[0].IsStopped() )
return false;
return BASE::m_dIterators[0].RewindTo(tRowID);
}
template <typename T, bool ROWID_LIMITS>
bool RowidIterator_Intersect_T<T,ROWID_LIMITS>::GetNextRowIdBlock ( RowIdBlock_t & dRowIdBlock )
{
RowID_t * pRowIdStart = BASE::m_dCollected.Begin();
RowID_t * pRowIdMax = pRowIdStart + Min ( BASE::m_dCollected.GetLength()-1, m_iRowsLeft );
RowID_t * pRowID = pRowIdStart;
auto & tFirst = BASE::m_dIterators[0];
// we assume that iterators are sorted from most selective to least selective
while ( pRowID<pRowIdMax )
{
if ( !tFirst.m_pRowID )
break;
if ( !AdvanceIterators() )
{
tFirst.Stop();
break;
}
*pRowID++ = *tFirst.m_pRowID;
tFirst.m_pRowID++;
if ( tFirst.m_pRowID>=tFirst.m_pRowIDMax && !tFirst.WarmupDocs() )
break;
}
if ( m_iRowsLeft!=INT_MAX )
{
m_iRowsLeft -= pRowID-pRowIdStart;
assert ( m_iRowsLeft>=0 );
}
return ReturnIteratorResult ( pRowID, pRowIdStart, dRowIdBlock );
}
template <typename T, bool ROWID_LIMITS>
bool RowidIterator_Intersect_T<T,ROWID_LIMITS>::AdvanceIterators()
{
auto & tFirst = BASE::m_dIterators[0];
RowID_t tMaxRowID = *tFirst.m_pRowID;
for ( int i=1; i < BASE::m_dIterators.GetLength(); i++ )
{
auto & tState = BASE::m_dIterators[i];
if ( !tState.m_pRowID && !tState.WarmupDocs(tMaxRowID) )
return false;
if ( *tState.m_pRowID==tMaxRowID )
continue;
if ( !tState.RewindTo(tMaxRowID) )
return false;
if ( *tState.m_pRowID>tMaxRowID )
{
if ( !tFirst.RewindTo( *tState.m_pRowID ) )
return false;
tMaxRowID = *tFirst.m_pRowID;
i = 0;
}
}
return true;
}
template <typename T, bool ROWID_LIMITS>
void RowidIterator_Intersect_T<T,ROWID_LIMITS>::AddDesc ( CSphVector<IteratorDesc_t> & dDesc ) const
{
for ( const auto & i : BASE::m_dIterators )
i.m_pIterator->AddDesc(dDesc);
}
/////////////////////////////////////////////////////////////////////
template <typename T, bool ROWID_LIMITS>
class RowidIterator_Union_T : public RowidIterator_Base_T<T,ROWID_LIMITS>
{
using BASE = RowidIterator_Base_T<T,ROWID_LIMITS>;
public:
RowidIterator_Union_T ( T ** ppIterators, int iNumIterators, const RowIdBoundaries_t * pBoundaries = nullptr );
bool HintRowID ( RowID_t tRowID ) override;
bool GetNextRowIdBlock ( RowIdBlock_t & dRowIdBlock ) override;
void SetCutoff ( int iCutoff ) override { m_iRowsLeft = iCutoff; }
bool WasCutoffHit() const override { return !m_iRowsLeft; }
void AddDesc ( CSphVector<IteratorDesc_t> & dDesc ) const override;
private:
CSphQueue<typename BASE::IteratorState_t*, typename BASE::IteratorState_t> m_tMerge;
int m_iRowsLeft = INT_MAX;
FORCE_INLINE void AdvanceQueue ( typename BASE::IteratorState_t * pState );
};
template <typename T, bool ROWID_LIMITS>
RowidIterator_Union_T<T,ROWID_LIMITS>::RowidIterator_Union_T ( T ** ppIterators, int iNumIterators, const RowIdBoundaries_t * pBoundaries )
: BASE ( ppIterators, iNumIterators, pBoundaries )
, m_tMerge ( iNumIterators )
{
for ( auto & i : BASE::m_dIterators )
if ( i.WarmupDocs() )
m_tMerge.Push(&i);
}
template <typename T, bool ROWID_LIMITS>
bool RowidIterator_Union_T<T,ROWID_LIMITS>::HintRowID ( RowID_t tRowID )
{
if ( !m_tMerge.GetLength() )
return false;
if ( tRowID<=*m_tMerge.Last()->m_pRowID )
return true;
m_tMerge.Clear();
for ( auto & i : BASE::m_dIterators )
if ( i.m_pRowID && i.RewindTo(tRowID) )
m_tMerge.Push(&i);
return m_tMerge.GetLength()>0;
}
template <typename T, bool ROWID_LIMITS>
void RowidIterator_Union_T<T,ROWID_LIMITS>::AdvanceQueue ( typename BASE::IteratorState_t * pState )
{
pState->m_pRowID++;
if ( pState->m_pRowID<pState->m_pRowIDMax || pState->WarmupDocs() )
m_tMerge.Push(pState);
}
template <typename T, bool ROWID_LIMITS>
bool RowidIterator_Union_T<T,ROWID_LIMITS>::GetNextRowIdBlock ( RowIdBlock_t & dRowIdBlock )
{
RowID_t * pRowIdStart = BASE::m_dCollected.Begin();
RowID_t * pRowIdMax = pRowIdStart + Min ( BASE::m_dCollected.GetLength()-1, m_iRowsLeft );
RowID_t * pRowID = pRowIdStart;
RowID_t tLastRowID = INVALID_ROWID;
while ( pRowID<pRowIdMax && m_tMerge.GetLength()>1 )
{
auto pState = m_tMerge.Root();
m_tMerge.Pop();
RowID_t tCurRowID = *pState->m_pRowID;
if ( tCurRowID!=tLastRowID ) // skip all items with the same row-id
*pRowID++ = tCurRowID;
tLastRowID = tCurRowID;
AdvanceQueue(pState);
}
if ( m_tMerge.GetLength()==1 )
{
auto pState = m_tMerge.Root();
do
{
while ( pRowID<pRowIdMax && pState->m_pRowID<pState->m_pRowIDMax )
*pRowID++ = *pState->m_pRowID++;
}
while ( pState->m_pRowID>=pState->m_pRowIDMax && pState->WarmupDocs() );
if ( !pState->m_pRowID )
m_tMerge.Pop();
}
if ( m_iRowsLeft!=INT_MAX )
{
m_iRowsLeft -= pRowID-pRowIdStart;
assert ( m_iRowsLeft>=0 );
}
return ReturnIteratorResult ( pRowID, pRowIdStart, dRowIdBlock );
}
template <typename T, bool ROWID_LIMITS>
void RowidIterator_Union_T<T,ROWID_LIMITS>::AddDesc ( CSphVector<IteratorDesc_t> & dDesc ) const
{
std::vector<common::IteratorDesc_t> dIteratorDesc;
BASE::m_dIterators[0].m_pIterator->AddDesc(dIteratorDesc);
dDesc.Add ( { dIteratorDesc[0].m_sAttr.c_str(), dIteratorDesc[0].m_sType.c_str() } );
}
template <>
void RowidIterator_Union_T<RowidIterator_i,true>::AddDesc ( CSphVector<IteratorDesc_t> & dDesc ) const
{
BASE::m_dIterators[0].m_pIterator->AddDesc(dDesc);
}
template <>
void RowidIterator_Union_T<RowidIterator_i,false>::AddDesc ( CSphVector<IteratorDesc_t> & dDesc ) const
{
BASE::m_dIterators[0].m_pIterator->AddDesc(dDesc);
}
/////////////////////////////////////////////////////////////////////
template <bool ROWID_LIMITS>
class RowidIterator_Wrapper_T : public RowidIterator_i
{
public:
RowidIterator_Wrapper_T ( common::BlockIterator_i * pIterator, const RowIdBoundaries_t * pBoundaries = nullptr );
bool HintRowID ( RowID_t tRowID ) override { return m_pIterator->HintRowID(tRowID); }
bool GetNextRowIdBlock ( RowIdBlock_t & dRowIdBlock ) override;
int64_t GetNumProcessed() const override { return m_pIterator->GetNumProcessed(); }
void SetCutoff ( int iCutoff ) override { m_pIterator->SetCutoff(iCutoff); }
bool WasCutoffHit() const override { return false; }
void AddDesc ( CSphVector<IteratorDesc_t> & dDesc ) const override;
private:
std::unique_ptr<common::BlockIterator_i> m_pIterator;
RowIdBoundaries_t m_tBoundaries;
CSphVector<RowID_t> m_dCollected;
};
template <bool ROWID_LIMITS>
RowidIterator_Wrapper_T<ROWID_LIMITS>::RowidIterator_Wrapper_T ( common::BlockIterator_i * pIterator, const RowIdBoundaries_t * pBoundaries )
: m_pIterator ( pIterator )
{
if ( pBoundaries )
m_tBoundaries = *pBoundaries;
}
template <>
bool RowidIterator_Wrapper_T<true>::GetNextRowIdBlock ( RowIdBlock_t & dRowIdBlock )
{
do
{
util::Span_T<uint32_t> dSpan;
if ( !m_pIterator->GetNextRowIdBlock(dSpan) )
return false;
dRowIdBlock = { (RowID_t *)dSpan.begin(), (int64_t)dSpan.size() };
if ( !dSpan.size() )
return true;
RowID_t tMinSpanRowID = dSpan.front();
RowID_t tMaxSpanRowID = dSpan.back();
// we need additional filtering only on first and last blocks
// per-block filtering is performed inside MCL
if ( tMinSpanRowID < m_tBoundaries.m_tMinRowID || tMaxSpanRowID > m_tBoundaries.m_tMaxRowID )
dRowIdBlock = DoRowIdFiltering ( dRowIdBlock, m_tBoundaries, m_dCollected );
}
while ( !dRowIdBlock.GetLength() );
return true;
}
template <>
bool RowidIterator_Wrapper_T<false>::GetNextRowIdBlock ( RowIdBlock_t & dRowIdBlock )
{
util::Span_T<uint32_t> dSpan;
if ( !m_pIterator->GetNextRowIdBlock(dSpan) )
return false;
dRowIdBlock = { (RowID_t *)dSpan.begin(), (int64_t)dSpan.size() };
return true;
}
template <bool ROWID_LIMITS>
void RowidIterator_Wrapper_T<ROWID_LIMITS>::AddDesc ( CSphVector<IteratorDesc_t> & dDesc ) const
{
assert(m_pIterator);
std::vector<common::IteratorDesc_t> dIteratorDesc;
m_pIterator->AddDesc(dIteratorDesc);
for ( const auto & i : dIteratorDesc )
dDesc.Add ( { i.m_sAttr.c_str(), i.m_sType.c_str() } );
}
//////////////////////////////////////////////////////////////////////////
static bool NextSet ( CSphVector<int> & dSet, const CSphVector<SecondaryIndexInfo_t> & dSecondaryIndexes )
{
for ( int i = 0; i < dSet.GetLength(); i++ )
{
int iNumCapabilities = dSecondaryIndexes[i].m_dCapabilities.GetLength();
if ( !iNumCapabilities )
continue;
dSet[i]++;
if ( dSet[i] >= iNumCapabilities )
dSet[i] = 0;
else
return true;
}
return false;
}
static SIDefault_e g_eSIState = SIDefault_e::ENABLED;
void SetSecondaryIndexDefault ( SIDefault_e eState )
{
g_eSIState = eState;
}
SIDefault_e GetSecondaryIndexDefault ()
{
return g_eSIState;
}
static bool CheckIndexHint ( const CSphFilterSettings & tFilter, const CSphVector<IndexHint_t> & dHints, SecondaryIndexType_e eType, bool & bForce )
{
bForce = false;
for ( const auto & i : dHints )
if ( i.m_sIndex==tFilter.m_sAttrName && i.m_eType==eType )
{
bForce = i.m_bForce;
return bForce;
}
return true;
}
static bool HaveSI ( const CSphFilterSettings & tFilter, const SelectIteratorCtx_t & tCtx, bool & bForce )
{
if ( !tCtx.IsEnabled_SI(tFilter) )
return false;
if ( !CheckIndexHint ( tFilter, tCtx.m_tQuery.m_dIndexHints, SecondaryIndexType_e::INDEX, bForce ) )
return false;
if ( !IsSecondaryLibLoaded() || GetSecondaryIndexDefault()==SIDefault_e::DISABLED )
return false;
// force secondary indexes from non-default files (that would be JSON SI); at least for now
// the idea is that they should always be faster
auto * pAttr = tCtx.m_tIndexSchema.GetAttr ( tFilter.m_sAttrName.cstr() );
if ( !pAttr || pAttr->m_eAttrType==SPH_ATTR_JSON )
bForce = true;
return true;
}
static bool HaveAnalyzer ( const CSphFilterSettings & tFilter, const SelectIteratorCtx_t & tCtx, bool & bForce )
{
if ( !tCtx.IsEnabled_Analyzer(tFilter) )
return false;
return CheckIndexHint ( tFilter, tCtx.m_tQuery.m_dIndexHints, SecondaryIndexType_e::ANALYZER, bForce );
}
static bool HaveLookup ( const CSphFilterSettings & tFilter, const CSphVector<IndexHint_t> & dHints, bool & bForce )
{
if ( tFilter.m_sAttrName!=sphGetDocidName() )
return false;
return CheckIndexHint ( tFilter, dHints, SecondaryIndexType_e::LOOKUP, bForce );
}
static void FetchHistogramInfo ( CSphVector<SecondaryIndexInfo_t> & dSIInfo, const SelectIteratorCtx_t & tCtx )
{
if ( !tCtx.m_pHistograms )
return;
ARRAY_FOREACH ( i, tCtx.m_dFilters )
{
const CSphFilterSettings & tFilter = tCtx.m_dFilters[i];
const Histogram_i * pHistogram = tCtx.m_pHistograms->Get ( tFilter.m_sAttrName );
auto & tSIInfo = dSIInfo[i];
if ( !pHistogram )
{
auto * pAttr = tCtx.m_tIndexSchema.GetAttr ( tFilter.m_sAttrName.cstr() );
tSIInfo.m_iTotalValues = tCtx.m_iTotalDocs;
tSIInfo.m_iRsetEstimate = tCtx.m_iTotalDocs;
tSIInfo.m_bUsable = !pAttr || pAttr->m_eAttrType==SPH_ATTR_JSON;
continue;
}
HistogramRset_t tEstimate;
tSIInfo.m_bUsable = pHistogram->EstimateRsetSize ( tFilter, tEstimate );
tSIInfo.m_iTotalValues = pHistogram->GetNumValues();
tSIInfo.m_iRsetEstimate = tSIInfo.m_bUsable ? tEstimate.m_iTotal : tSIInfo.m_iTotalValues;
tSIInfo.m_bHasHistograms = true;
}
}
static void MarkAvailableLookup ( CSphVector<SecondaryIndexInfo_t> & dSIInfo, const SelectIteratorCtx_t & tCtx )
{
ARRAY_FOREACH ( i, tCtx.m_dFilters )
{
if ( !dSIInfo[i].m_bUsable )
continue;
bool bForce = false;
if ( !HaveLookup( tCtx.m_dFilters[i], tCtx.m_tQuery.m_dIndexHints, bForce ) )
continue;
dSIInfo[i].m_dCapabilities.Add ( SecondaryIndexType_e::LOOKUP );
if ( bForce )
dSIInfo[i].m_eForce = SecondaryIndexType_e::LOOKUP;
}
}
static void MarkAvailableSI ( CSphVector<SecondaryIndexInfo_t> & dSIInfo, const SelectIteratorCtx_t & tCtx )
{
if ( !tCtx.m_pHistograms )
return;
ARRAY_FOREACH ( i, tCtx.m_dFilters )
{
if ( !dSIInfo[i].m_bUsable )
continue;
bool bForce = false;
if ( !HaveSI ( tCtx.m_dFilters[i], tCtx, bForce ) )
continue;
if ( bForce )
dSIInfo[i].m_eForce = SecondaryIndexType_e::INDEX;
dSIInfo[i].m_dCapabilities.Add ( SecondaryIndexType_e::INDEX );
}
}
static void MarkAvailableAnalyzers ( CSphVector<SecondaryIndexInfo_t> & dSIInfo, const SelectIteratorCtx_t & tCtx )
{
ARRAY_FOREACH ( i, tCtx.m_dFilters )
{
if ( !dSIInfo[i].m_bUsable )
continue;
bool bForce = false;
if ( !HaveAnalyzer ( tCtx.m_dFilters[i], tCtx, bForce ) )
continue;
if ( bForce )
dSIInfo[i].m_eForce = SecondaryIndexType_e::ANALYZER;
dSIInfo[i].m_dCapabilities.Add ( SecondaryIndexType_e::ANALYZER );
// this belongs in the CBO, but for now let's just remove the option to evaluate FILTER if ANALYZER is present
// as ANALYZERs are always faster than FILTERs
dSIInfo[i].m_dCapabilities.RemoveValue ( SecondaryIndexType_e::FILTER );
if ( dSIInfo[i].m_eType==SecondaryIndexType_e::FILTER )
dSIInfo[i].m_eType = SecondaryIndexType_e::ANALYZER;
}
}
static void MarkAvailableOptional ( CSphVector<SecondaryIndexInfo_t> & dSIInfo, const SelectIteratorCtx_t & tCtx )
{
ARRAY_FOREACH ( i, tCtx.m_dFilters )
if ( tCtx.m_dFilters[i].m_bOptional )
dSIInfo[i].m_dCapabilities.Add ( SecondaryIndexType_e::NONE );
}
static void RemoveOptionalColumnar ( CSphVector<SecondaryIndexInfo_t> & dSIInfo, const SelectIteratorCtx_t & tCtx )
{
// if we have a columnar attribute and it's capabilities are limited to only "filter" and "none"
// then we disabled columnarscan and secondaryindex via index hints
// such filters need to be removed
ARRAY_FOREACH ( i, tCtx.m_dFilters )
{
auto & tSIInfo = dSIInfo[i];
auto & tFilter = tCtx.m_dFilters[i];
if ( tSIInfo.m_dCapabilities.GetLength()==2 && tSIInfo.m_dCapabilities[0]==SecondaryIndexType_e::FILTER && tSIInfo.m_dCapabilities[1]==SecondaryIndexType_e::NONE && tCtx.IsEnabled_Analyzer(tFilter) )
{
tSIInfo.m_dCapabilities.RemoveFast(0);
tSIInfo.m_eType = SecondaryIndexType_e::NONE;
}
}
}
static void ForceSI ( CSphVector<SecondaryIndexInfo_t> & dSIInfo )
{
for ( auto & i : dSIInfo )
if ( i.m_eForce!=SecondaryIndexType_e::NONE )
{
i.m_dCapabilities.Resize(0);
i.m_dCapabilities.Add ( i.m_eForce );
i.m_eType = i.m_eForce;
}
}
static void DisableRowidFilters ( CSphVector<SecondaryIndexInfo_t> & dSIInfo, const SelectIteratorCtx_t & tCtx )
{
ARRAY_FOREACH ( i, dSIInfo )
if ( tCtx.m_dFilters[i].m_sAttrName=="@rowid" )
dSIInfo[i].m_dCapabilities.Resize(0);
}
static void FetchPartialColumnarMinMax ( CSphVector<SecondaryIndexInfo_t> & dSIInfo, const SelectIteratorCtx_t & tCtx )
{
ARRAY_FOREACH ( i, dSIInfo )
{
auto & tSIInfo = dSIInfo[i];
auto & tFilter = tCtx.m_dFilters[i];
bool bHaveAnalyzers = tSIInfo.m_dCapabilities.any_of ( []( auto eCapability ){ return eCapability==SecondaryIndexType_e::ANALYZER; } );
bool bHaveSI = tSIInfo.m_dCapabilities.any_of ( []( auto eCapability ){ return eCapability==SecondaryIndexType_e::INDEX; } );
bool bHaveLookups = tSIInfo.m_dCapabilities.any_of ( []( auto eCapability ){ return eCapability==SecondaryIndexType_e::LOOKUP; } );
if ( bHaveAnalyzers && ( bHaveSI || bHaveLookups ) )
{
// create a single filter and run it through partial columnar minmax
VecTraits_T<CSphFilterSettings> dFilter = { &tFilter, 1 };
CreateFilterContext_t tCFCtx;
tCFCtx.m_pFilters = &dFilter;
tCFCtx.m_pMatchSchema = &tCtx.m_tSorterSchema;
tCFCtx.m_pIndexSchema = &tCtx.m_tIndexSchema;
tCFCtx.m_pColumnar = tCtx.m_pColumnar;
tCFCtx.m_eCollation = tCtx.m_tQuery.m_eCollation;
tCFCtx.m_bScan = true;
tCFCtx.m_pHistograms= tCtx.m_pHistograms;
tCFCtx.m_iTotalDocs = tCtx.m_iTotalDocs;
CSphString sError, sWarning;
if ( !sphCreateFilters ( tCFCtx, sError, sWarning ) )
continue;
common::Filter_t tColumnarFilter;
if ( !ToColumnarFilter ( tColumnarFilter, tFilter, tCtx.m_tQuery.m_eCollation, tCtx.m_tIndexSchema, sWarning ) )
continue;
tSIInfo.m_iPartialColumnarMinMax = tCtx.m_pColumnar->EstimateMinMax ( tColumnarFilter, *tCFCtx.m_pFilter );
}
}
}
static uint32_t CalcNumSIIterators ( const CSphFilterSettings & tFilter, int64_t iDocs, const SelectIteratorCtx_t & tCtx )
{
uint32_t uNumIterators = 1;
if ( tCtx.m_tSI.IsEmpty() )
return uNumIterators;
common::Filter_t tColumnarFilter;
CSphString sWarning;
if ( !ToColumnarFilter ( tColumnarFilter, tFilter, tCtx.m_tQuery.m_eCollation, tCtx.m_tIndexSchema, sWarning ) )
return 0;
return tCtx.m_tSI.GetNumIterators(tColumnarFilter);
}
static void FetchNumSIIterators ( CSphVector<SecondaryIndexInfo_t> & dSIInfo, const SelectIteratorCtx_t & tCtx )
{
ARRAY_FOREACH ( i, dSIInfo )
{
auto & tSIInfo = dSIInfo[i];
tSIInfo.m_uNumSIIterators = CalcNumSIIterators ( tCtx.m_dFilters[i], tSIInfo.m_iRsetEstimate, tCtx );
}
}
static void CheckHint ( const IndexHint_t & tHint, const CSphFilterSettings & tFilter, const SecondaryIndexInfo_t & tSIInfo, const SelectIteratorCtx_t & tCtx, StrVec_t & dWarnings )
{
CSphString sWarning;
const auto * pAttr = tCtx.m_tIndexSchema.GetAttr ( tHint.m_sIndex.cstr() );
if ( !pAttr && !sphJsonNameSplit ( tHint.m_sIndex.cstr() ) )
{
sWarning.SetSprintf ( "hint error: '%s' attribute not found", tHint.m_sIndex.cstr() );
dWarnings.Add (sWarning);
return;
}
if ( !tSIInfo.m_bHasHistograms && !tSIInfo.m_bUsable )
{
sWarning.SetSprintf ( "hint error: histogram not found for attribute '%s'", tHint.m_sIndex.cstr() );
dWarnings.Add (sWarning);
}
else if ( !tSIInfo.m_bUsable )
{
sWarning.SetSprintf ( "hint error: histogram unusable for attribute '%s'", tHint.m_sIndex.cstr() );
dWarnings.Add (sWarning);
}
switch ( tHint.m_eType )
{
case SecondaryIndexType_e::LOOKUP:
if ( tHint.m_sIndex!=sphGetDocidName() )
dWarnings.Add ( "hint error: DocidIndex can only be applied to 'id' attribute" );
break;
case SecondaryIndexType_e::ANALYZER:
if ( tHint.m_bForce )
{
if ( !IsColumnarLibLoaded() )
dWarnings.Add ( "hint error: columnar library not loaded" );
else if ( !tCtx.m_pColumnar )
dWarnings.Add ( "hint error: no columnar storage" );
else if ( pAttr->m_eAttrType==SPH_ATTR_STRING && tCtx.m_tQuery.m_eCollation!=SPH_COLLATION_DEFAULT )
{
sWarning.SetSprintf ( "hint error: unsupported collation; ColumnarScan might be slow for '%s'", tHint.m_sIndex.cstr() );
dWarnings.Add(sWarning);
}
else
{
const auto * pAttr = tCtx.m_tIndexSchema.GetAttr ( tHint.m_sIndex.cstr()) ;
if ( !pAttr->IsColumnar() && !pAttr->IsColumnarExpr() )
{
sWarning.SetSprintf ( "hint error: attribute '%s' is not columnar", tHint.m_sIndex.cstr() );
dWarnings.Add(sWarning);
}
}
}
break;
case SecondaryIndexType_e::INDEX:
if ( tHint.m_bForce )
{
if ( !IsSecondaryLibLoaded() )
dWarnings.Add ( "hint error: secondary library not loaded" );
else if ( GetSecondaryIndexDefault()==SIDefault_e::DISABLED )
dWarnings.Add ( "hint error: secondary indexes are disabled" );
else if ( tCtx.m_tSI.IsEmpty() )
dWarnings.Add ( "hint error: table has no secondary indexes" );
else if ( !tCtx.m_tSI.IsEnabled ( tHint.m_sIndex ) )
{
sWarning.SetSprintf ( "hint error: secondary index disabled for '%s' (attribute was updated?)", tHint.m_sIndex.cstr() );
dWarnings.Add(sWarning);
}
else if ( pAttr->m_eAttrType==SPH_ATTR_STRING && tCtx.m_tQuery.m_eCollation!=SPH_COLLATION_DEFAULT )
{
sWarning.SetSprintf ( "hint error: unsupported collation; secondary index disabled for '%s'", tHint.m_sIndex.cstr() );
dWarnings.Add(sWarning);
}
else if ( pAttr->m_pExpr.Ptr() && !pAttr->IsColumnarExpr() )
{
sWarning.SetSprintf ( "hint error: attribute is an expression; secondary index disabled for '%s'", tHint.m_sIndex.cstr() );
dWarnings.Add(sWarning);
}
else if ( tFilter.m_eType!=SPH_FILTER_VALUES && tFilter.m_eType!=SPH_FILTER_STRING && tFilter.m_eType!=SPH_FILTER_STRING_LIST && tFilter.m_eType!=SPH_FILTER_RANGE && tFilter.m_eType!=SPH_FILTER_FLOATRANGE )
{
sWarning.SetSprintf ( "hint error: unsupported filter type; secondary index disabled for '%s'", tHint.m_sIndex.cstr() );
dWarnings.Add(sWarning);
}
else if ( tFilter.m_eMvaFunc==SPH_MVAFUNC_ALL )
{
sWarning.SetSprintf ( "hint error: unsupported mva eval type; secondary index disabled for '%s'", tHint.m_sIndex.cstr() );
dWarnings.Add(sWarning);
}
}
break;
default:
break;
}
}
static void CheckHints ( const CSphVector<SecondaryIndexInfo_t> & dSIInfo, const SelectIteratorCtx_t & tCtx, StrVec_t & dWarnings )
{
const auto & dFilters = tCtx.m_dFilters;
for ( auto & tHint : tCtx.m_tQuery.m_dIndexHints )
{
int iFilter = -1;
ARRAY_FOREACH ( i, dFilters )
if ( dFilters[i].m_sAttrName==tHint.m_sIndex )
{
iFilter = i;
break;
}
if ( iFilter==-1 )
{
CSphString sWarning;
sWarning.SetSprintf ( "hint error: filter not found for attribute '%s'", tHint.m_sIndex.cstr() );
dWarnings.Add(sWarning);
}
else
CheckHint ( tHint, dFilters[iFilter], dSIInfo[iFilter], tCtx, dWarnings );
}
ARRAY_FOREACH ( i, dFilters )
for ( auto & tHint : tCtx.m_tQuery.m_dIndexHints )
if ( tHint.m_sIndex==dFilters[i].m_sAttrName && tHint.m_bForce )
if ( !dSIInfo[i].m_dCapabilities.any_of ( [&tHint]( auto eSupported ){ return tHint.m_eType==eSupported; } ) )
{
CSphString sWarning;
sWarning.SetSprintf ( "hint error: requested hint type not supported for attribute '%s'", tHint.m_sIndex.cstr() );
dWarnings.Add(sWarning);
}
}
/////////////////////////////////////////////////////////////////////
CSphVector<SecondaryIndexInfo_t> SelectIterators ( const SelectIteratorCtx_t & tCtx, float & fBestCost, StrVec_t & dWarnings )
{
fBestCost = FLT_MAX;
CSphVector<SecondaryIndexInfo_t> dSIInfo ( tCtx.m_dFilters.GetLength() );
ARRAY_FOREACH ( i, dSIInfo )
dSIInfo[i].m_dCapabilities.Add ( SecondaryIndexType_e::FILTER );
if ( !tCtx.m_pHistograms )
{
if ( tCtx.m_tQuery.m_dIndexHints.GetLength() )
dWarnings.Add ( "index has no histograms; secondary indexes are unavailable" );
return dSIInfo;
}
// no iterators with OR queries
if ( !tCtx.m_tQuery.m_dFilterTree.IsEmpty() )
{
if ( tCtx.m_tQuery.m_dIndexHints.GetLength() )
dWarnings.Add ( "secondary indexes are not available when using the OR operator between filters" );
return dSIInfo;
}
FetchHistogramInfo ( dSIInfo, tCtx );
MarkAvailableLookup ( dSIInfo, tCtx );
MarkAvailableSI ( dSIInfo, tCtx );
MarkAvailableAnalyzers ( dSIInfo, tCtx );
MarkAvailableOptional ( dSIInfo, tCtx );
RemoveOptionalColumnar ( dSIInfo, tCtx );
ForceSI(dSIInfo);
DisableRowidFilters ( dSIInfo, tCtx );
FetchPartialColumnarMinMax ( dSIInfo, tCtx );
FetchNumSIIterators ( dSIInfo, tCtx );
CheckHints ( dSIInfo, tCtx, dWarnings );
CSphVector<int> dCapabilities ( dSIInfo.GetLength() );
CSphVector<int> dBest ( dSIInfo.GetLength() );
dCapabilities.ZeroVec();
dBest.ZeroVec();
const int MAX_TRIES = 1024;
for ( int iTry = 0; iTry < MAX_TRIES; iTry++ )
{
for ( int i = 0; i < dCapabilities.GetLength(); i++ )
dSIInfo[i].m_eType = dSIInfo[i].m_dCapabilities.GetLength() ? dSIInfo[i].m_dCapabilities[dCapabilities[i]] : SecondaryIndexType_e::NONE;
// don't use cutoff if we have more than one filter
int iCutoff = dSIInfo.GetLength() > 1 ? -1 : tCtx.m_iCutoff;
std::unique_ptr<CostEstimate_i> pCostEstimate ( CreateCostEstimate ( dSIInfo, tCtx, iCutoff ) );
float fCost = pCostEstimate->CalcQueryCost();
if ( fCost < fBestCost )
{
dBest = dCapabilities;
fBestCost = fCost;
}
if ( !NextSet ( dCapabilities, dSIInfo ) )
break;
}
for ( int i = 0; i < dBest.GetLength(); i++ )
dSIInfo[i].m_eType = dSIInfo[i].m_dCapabilities.GetLength() ? dSIInfo[i].m_dCapabilities[dBest[i]] : SecondaryIndexType_e::NONE;
return dSIInfo;
}
const CSphFilterSettings * GetRowIdFilter ( const CSphVector<CSphFilterSettings> & dFilters, RowID_t uTotalDocs, RowIdBoundaries_t & tRowidBounds )
{
const CSphFilterSettings * pRowIdFilter = nullptr;
for ( const auto & tFilter : dFilters )
if ( tFilter.m_sAttrName=="@rowid" )
{
pRowIdFilter = &tFilter;
break;
}
if ( pRowIdFilter )
tRowidBounds = GetFilterRowIdBoundaries ( *pRowIdFilter, uTotalDocs );
return pRowIdFilter;
}
RowidIterator_i * CreateIteratorIntersect ( CSphVector<RowidIterator_i*> & dIterators, const RowIdBoundaries_t * pBoundaries )
{
if ( pBoundaries )
return new RowidIterator_Intersect_T<RowidIterator_i,true> ( dIterators.Begin(), dIterators.GetLength(), pBoundaries );
else
return new RowidIterator_Intersect_T<RowidIterator_i,false> ( dIterators.Begin(), dIterators.GetLength() );
}
RowidIterator_i * CreateIteratorWrapper ( common::BlockIterator_i * pIterator, const RowIdBoundaries_t * pBoundaries )
{
if ( pBoundaries )
return new RowidIterator_Wrapper_T<true> ( pIterator, pBoundaries );
else
return new RowidIterator_Wrapper_T<false> ( pIterator );
}
//////////////////////////////////////////////////////////////////////////
class RowidEmptyIterator_c : public RowidIterator_i
{
public:
RowidEmptyIterator_c ( const CSphString & sAttr ) : m_sAttr ( sAttr ){}
bool HintRowID ( RowID_t tRowID ) override { return false; }
bool GetNextRowIdBlock ( RowIdBlock_t & dRowIdBlock ) override { return false; }
int64_t GetNumProcessed() const override { return 0; }
void SetCutoff ( int iCutoff ) override {}
bool WasCutoffHit() const override { return false; }
void AddDesc ( CSphVector<IteratorDesc_t> & dDesc ) const override { dDesc.Add ( { m_sAttr, "SecondaryIndex" } ); }
private:
CSphString m_sAttr;
};
/////////////////////////////////////////////////////////////////////
class SIIteratorCreator_c
{
public:
SIIteratorCreator_c ( const SIContainer_c & tSI, CSphVector<SecondaryIndexInfo_t> & dSIInfo, const CSphVector<CSphFilterSettings> & dFilters, ESphCollation eCollation, const ISphSchema & tSchema, RowID_t uRowsCount, int iCutoff );
RowIteratorsWithEstimates_t Create();
private:
const SIContainer_c & m_tSI;
CSphVector<SecondaryIndexInfo_t> & m_dSIInfo;
const CSphVector<CSphFilterSettings> & m_dFilters;
ESphCollation m_eCollation;
const ISphSchema & m_tSchema;
RowID_t m_uRowsCount = 0;
int m_iCutoff = 0;
RowIdBoundaries_t m_tRowidBounds;
const CSphFilterSettings * m_pRowIdFilter = nullptr;
bool CreateSIIterators ( std::vector<common::BlockIterator_i *> & dFilterIt, const CSphFilterSettings & tFilter, int64_t iRsetSize );
RowidIterator_i * CreateRowIdIteratorFromSI ( std::vector<common::BlockIterator_i *> & dFilterIt, const CSphFilterSettings & tFilter );
};
SIIteratorCreator_c::SIIteratorCreator_c ( const SIContainer_c & tSI, CSphVector<SecondaryIndexInfo_t> & dSIInfo, const CSphVector<CSphFilterSettings> & dFilters, ESphCollation eCollation, const ISphSchema & tSchema, RowID_t uRowsCount, int iCutoff )
: m_tSI ( tSI )
, m_dSIInfo ( dSIInfo )
, m_dFilters ( dFilters )
, m_eCollation ( eCollation )
, m_tSchema ( tSchema )
, m_uRowsCount ( uRowsCount )
, m_iCutoff ( iCutoff )
, m_pRowIdFilter ( GetRowIdFilter ( dFilters, uRowsCount, m_tRowidBounds ) )
{}
bool SIIteratorCreator_c::CreateSIIterators ( std::vector<common::BlockIterator_i *> & dFilterIt, const CSphFilterSettings & tFilter, int64_t iRsetSize )
{
common::RowidRange_t tRange { m_tRowidBounds.m_tMinRowID, m_tRowidBounds.m_tMaxRowID };
if ( m_iCutoff>=0 )
iRsetSize = Min ( iRsetSize, m_iCutoff );
bool bCreated = false;
common::Filter_t tColumnarFilter;
CSphString sWarning;
CSphString sError;
if ( ToColumnarFilter ( tColumnarFilter, tFilter, m_eCollation, m_tSchema, sWarning ) )
bCreated = m_tSI.CreateIterators ( dFilterIt, tColumnarFilter, m_pRowIdFilter ? &tRange : nullptr, m_uRowsCount, iRsetSize, m_iCutoff, sError );
else
sphWarning ( "secondary index %s: %s", tFilter.m_sAttrName.cstr(), sWarning.cstr() );
if ( !bCreated )
{
// FIXME!!! return as query warning
sphWarning ( "%s", sError.cstr() );
for ( auto * pIt : dFilterIt ) { SafeDelete ( pIt ); }
dFilterIt.resize ( 0 );
return false;
}
if ( !sError.IsEmpty() )
{
// FIXME!!! return as query warning
sphWarning ( "secondary index %s:%s", tFilter.m_sAttrName.cstr(), sError.cstr() );
sError = "";
}
return true;
}
RowidIterator_i * SIIteratorCreator_c::CreateRowIdIteratorFromSI ( std::vector<common::BlockIterator_i *> & dFilterIt, const CSphFilterSettings & tFilter )
{
RowidIterator_i * pIt = nullptr;
if ( !dFilterIt.size() )
pIt = new RowidEmptyIterator_c ( tFilter.m_sAttrName );
else if ( dFilterIt.size()==1 )
{
if ( m_pRowIdFilter )
pIt = new RowidIterator_Wrapper_T<true> ( dFilterIt[0], &m_tRowidBounds );
else
pIt = new RowidIterator_Wrapper_T<false> ( dFilterIt[0] );
}
else
{
if ( m_pRowIdFilter )
pIt = new RowidIterator_Union_T<common::BlockIterator_i,true> ( &dFilterIt[0], (int)dFilterIt.size(), &m_tRowidBounds );
else
pIt = new RowidIterator_Union_T<common::BlockIterator_i,false> ( &dFilterIt[0], (int)dFilterIt.size() );
}
return pIt;
}
RowIteratorsWithEstimates_t SIIteratorCreator_c::Create()
{
RowIteratorsWithEstimates_t dRes;
ARRAY_FOREACH ( i, m_dSIInfo )
{
auto & tSIInfo = m_dSIInfo[i];
if ( tSIInfo.m_eType!=SecondaryIndexType_e::INDEX )
continue;
int64_t iRsetSize = tSIInfo.m_iRsetEstimate;
const CSphFilterSettings & tFilter = m_dFilters[i];
std::vector<common::BlockIterator_i *> dFilterIt;
if ( !CreateSIIterators ( dFilterIt, tFilter, iRsetSize ) )
continue;
RowidIterator_i * pIt = CreateRowIdIteratorFromSI ( dFilterIt, tFilter );
dRes.Add ( { pIt, iRsetSize } );
tSIInfo.m_bCreated = true;
}
return dRes;
}
/////////////////////////////////////////////////////////////////////
bool SIContainer_c::Load ( const CSphString & sFile, CSphString & sError )
{
SI::Index_i * pIndex = CreateSecondaryIndex ( sFile.cstr(), sError );
if ( !pIndex )
return false;
m_dIndexes.Add ( { std::unique_ptr<SI::Index_i>(pIndex), sFile } );
return true;
}
bool SIContainer_c::Drop ( const CSphString & sFile, CSphString & sError )
{
ARRAY_FOREACH ( i, m_dIndexes )
if ( m_dIndexes[i].m_sFile==sFile )
{
m_dIndexes.Remove(i);
return true;
}
sError.SetSprintf ( "secondary index file '%s' not loaded", sFile.cstr() );
return false;
}
void SIContainer_c::ColumnUpdated ( const CSphString & sAttr )
{
for ( auto & i : m_dIndexes )
if ( i.m_pIndex->IsEnabled ( sAttr.cstr() ) )
i.m_pIndex->ColumnUpdated ( sAttr.cstr() );
}
bool SIContainer_c::SaveMeta ( CSphString & sError ) const
{
std::string sTmpError;
for ( auto & i : m_dIndexes )
if ( !i.m_pIndex->SaveMeta ( sTmpError ) )
{
sError = sTmpError.c_str();
return false;
}
return true;
}
bool SIContainer_c::CreateIterators ( std::vector<common::BlockIterator_i *> & dIterators, const common::Filter_t & tFilter, const common::RowidRange_t * pBounds, uint32_t uMaxValues, int64_t iRsetSize, int iCutoff, CSphString & sError ) const
{
for ( auto & i : m_dIndexes )
if ( i.m_pIndex->IsEnabled ( tFilter.m_sName ) )
{
std::string sErrorSTL;
bool bOk = i.m_pIndex->CreateIterators ( dIterators, tFilter, pBounds, uMaxValues, iRsetSize, iCutoff, sErrorSTL );
sError = sErrorSTL.c_str();
return bOk;
}
return false;
}
int64_t SIContainer_c::GetCountDistinct ( const CSphString & sAttr ) const
{
std::string sAttrSTL = sAttr.cstr();
for ( auto & i : m_dIndexes )
if ( i.m_pIndex->IsEnabled(sAttrSTL) )
return i.m_pIndex->GetCountDistinct(sAttrSTL);
return -1;
}
bool SIContainer_c::CalcCount ( uint32_t & uCount, const common::Filter_t & tFilter, uint32_t uMaxValues, CSphString & sError ) const
{
for ( auto & i : m_dIndexes )
if ( i.m_pIndex->IsEnabled ( tFilter.m_sName ) )
{
std::string sErrorSTL;
bool bOk = i.m_pIndex->CalcCount ( uCount, tFilter, uMaxValues, sErrorSTL );
sError = sErrorSTL.c_str();
return bOk;
}
return false;
}
uint32_t SIContainer_c::GetNumIterators ( const common::Filter_t & tFilter ) const
{
for ( auto & i : m_dIndexes )
if ( i.m_pIndex->IsEnabled ( tFilter.m_sName ) )
return i.m_pIndex->GetNumIterators(tFilter);
return 0;
}
bool SIContainer_c::IsEnabled ( const CSphString & sAttr ) const
{
for ( auto & i : m_dIndexes )
if ( i.m_pIndex->IsEnabled ( sAttr.cstr() ) )
return true;
return false;
}
RowIteratorsWithEstimates_t SIContainer_c::CreateSecondaryIndexIterator ( CSphVector<SecondaryIndexInfo_t> & dSIInfo, const CSphVector<CSphFilterSettings> & dFilters, ESphCollation eCollation, const ISphSchema & tSchema, RowID_t uRowsCount, int iCutoff ) const
{
// don't use cutoff if we have more than one instance of SecondaryIndex/ColumnarScan
int iNumIterators = dSIInfo.count_of ( []( auto & tSI ){ return tSI.m_eType==SecondaryIndexType_e::INDEX || tSI.m_eType==SecondaryIndexType_e::ANALYZER; } );
if ( iNumIterators > 1 )
iCutoff = -1;
SIIteratorCreator_c tCreator ( *this, dSIInfo, dFilters, eCollation, tSchema, uRowsCount, iCutoff );
return tCreator.Create();
}
/////////////////////////////////////////////////////////////////////
static void ConvertSchema ( const CSphSchema & tSchema, common::Schema_t & tSISchema, CSphBitvec & tSIAttrs )
{
for ( int iAttr=0; iAttr<tSchema.GetAttrsCount(); iAttr++ )
{
const CSphColumnInfo & tCol = tSchema.GetAttr ( iAttr );
// skip special / iternal attributes
if ( sphIsInternalAttr ( tCol.m_sName ) )
continue;
if ( tCol.m_eAttrType==SPH_ATTR_JSON )
continue;
if ( tCol.m_eAttrType==SPH_ATTR_FLOAT_VECTOR && tCol.IsIndexedKNN() )
continue;
common::StringHash_fn fnStringCalcHash = nullptr;
common::AttrType_e eAttrType = ToColumnarType ( tCol.m_eAttrType, tCol.m_tLocator.m_iBitCount );
// fixme! make default collation configurable
if ( eAttrType==common::AttrType_e::STRING )
fnStringCalcHash = LibcCIHash_fn::Hash;
tSISchema.push_back ( { tCol.m_sName.cstr(), eAttrType, fnStringCalcHash } );
tSIAttrs.BitSet(iAttr);
}
}
static void GetAttrsProxy ( const ISphSchema & tSchema, common::Schema_t & tSISchema, CSphVector<PlainOrColumnar_t> & dDstAttrs )
{
int iColumnar = 0;
for ( const auto & i : tSISchema )
{
const CSphColumnInfo * pAttr = tSchema.GetAttr ( i.m_sName.c_str() );
assert(pAttr);
dDstAttrs.Add ( PlainOrColumnar_t ( *pAttr, iColumnar ) );
if ( pAttr->IsColumnar() )
iColumnar++;
}
}
std::unique_ptr<SI::Builder_i> CreateIndexBuilder ( int64_t iMemoryLimit, const CSphSchema & tSchema, CSphBitvec & tSIAttrs, const CSphString & sFile, int iBufferSize, CSphString & sError )
{
common::Schema_t tSISchema;
ConvertSchema ( tSchema, tSISchema, tSIAttrs );
return CreateSecondaryIndexBuilder ( tSISchema, iMemoryLimit, sFile, iBufferSize, sError );
}
std::unique_ptr<SI::Builder_i> CreateIndexBuilder ( int64_t iMemoryLimit, const CSphSchema & tSchema, const CSphString & sFile, CSphVector<PlainOrColumnar_t> & dAttrs, int iBufferSize, CSphString & sError )
{
common::Schema_t tSISchema;
CSphBitvec tSIAttrs ( tSchema.GetAttrsCount() );
ConvertSchema ( tSchema, tSISchema, tSIAttrs );
GetAttrsProxy ( tSchema, tSISchema, dAttrs );
return CreateSecondaryIndexBuilder ( tSISchema, iMemoryLimit, sFile, iBufferSize, sError );
}
void BuildStoreSI ( RowID_t tRowID, const CSphRowitem * pRow, const BYTE * pPool, CSphVector<ScopedTypedIterator_t> & dIterators, const CSphVector<PlainOrColumnar_t> & dAttrs, SI::Builder_i * pBuilder, CSphVector<int64_t> & dTmp )
{
for ( int i = 0; i < dAttrs.GetLength(); i++ )
{
const PlainOrColumnar_t & tSrc = dAttrs[i];
const BYTE * pSrc = nullptr;
switch ( tSrc.m_eType )
{
case SPH_ATTR_UINT32SET:
{
int iValues = tSrc.Get ( tRowID, pRow, pPool, dIterators, pSrc ) / sizeof(DWORD);
// need a 64-bit array as input. so we need to convert our 32-bit array to 64-bit entries
dTmp.Resize ( iValues );
ARRAY_FOREACH ( i, dTmp )
dTmp[i] = ((DWORD*)pSrc)[i];
pBuilder->SetAttr ( i, dTmp.Begin(), iValues );
}
break;
case SPH_ATTR_INT64SET:
{
int iValues = tSrc.Get ( tRowID, pRow, pPool, dIterators, pSrc ) / sizeof(int64_t);
pBuilder->SetAttr ( i, (const int64_t*)pSrc, iValues );
}
break;
case SPH_ATTR_FLOAT_VECTOR:
{
int iValues = tSrc.Get ( tRowID, pRow, pPool, dIterators, pSrc ) / sizeof(float);
// need a 64-bit array as input. so we need to convert our 32-bit array to 64-bit entries
dTmp.Resize ( iValues );
ARRAY_FOREACH ( i, dTmp )
dTmp[i] = sphF2DW ( ((float*)pSrc)[i] );
pBuilder->SetAttr ( i, dTmp.Begin(), iValues );
}
break;
case SPH_ATTR_STRING:
{
const BYTE * pSrc = nullptr;
int iBytes = tSrc.Get ( tRowID, pRow, pPool, dIterators, pSrc );
pBuilder->SetAttr ( i, (const uint8_t*)pSrc, iBytes );
}
break;
default:
pBuilder->SetAttr ( i, tSrc.Get ( tRowID, pRow, dIterators ) );
break;
}
}
}
| 44,397
|
C++
|
.cpp
| 1,176
| 35.12415
| 260
| 0.708164
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,875
|
testrt.cpp
|
manticoresoftware_manticoresearch/src/testrt.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxrt.h"
#include "accumulator.h"
#include "binlog.h"
#include "sphinxutils.h"
#include "sphinxsort.h"
#include "searchdaemon.h"
#include "indexing_sources/source_mysql.h"
#if HAVE_RTESTCONFIG_H
#include "rtestconfig.h"
#else
const char * rtestconfig = "error";
#endif
#if !defined (DATAFLD)
#define DATAFLD "data/"
#endif
#if _WIN32
#include "psapi.h"
#endif
int COMMIT_STEP = 1;
float g_fTotalMB = 0.0f;
void SetupIndexing ( CSphSource * pSrc )
{
CSphString sError;
if ( !pSrc->Connect ( sError ) )
sphDie ( "connect failed: %s", sError.cstr() );
if ( !pSrc->IterateStart ( sError ) )
sphDie ( "iterate-start failed: %s", sError.cstr() );
}
void DoSearch ( CSphIndex * pIndex )
{
printf ( "---\nsearching... " );
CSphQuery tQuery;
AggrResult_t tResult;
CSphQueryResult tQueryResult;
tQueryResult.m_pMeta = &tResult;
CSphMultiQueryArgs tArgs ( 1 );
tQuery.m_sQuery = "@title cat";
auto pParser = sphCreatePlainQueryParser();
tQuery.m_pQueryParser = pParser.get();
SphQueueSettings_t tQueueSettings ( pIndex->GetMatchSchema() );
CSphString sError;
SphQueueRes_t tRes;
ISphMatchSorter * pSorter = sphCreateQueue ( tQueueSettings, tQuery, sError, tRes );
if ( !pSorter )
{
printf ( "failed to create sorter; error=%s", tResult.m_sError.cstr() );
} else if ( !pIndex->MultiQuery ( tQueryResult, tQuery, { &pSorter, 1 }, tArgs ) )
{
printf ( "query failed; error=%s", pIndex->GetLastError().cstr() );
} else
{
auto & tOneRes = tResult.m_dResults.Add ();
tOneRes.FillFromSorter ( pSorter );
printf ( "%d results found in %d.%03d sec!\n", tOneRes.m_dMatches.GetLength(), tResult.m_iQueryTime/1000, tResult.m_iQueryTime%1000 );
ARRAY_FOREACH ( i, tOneRes.m_dMatches )
printf ( "%d. rowid=%u, weight=%d\n", 1+i, tOneRes.m_dMatches[i].m_tRowID, tOneRes.m_dMatches[i].m_iWeight );
}
SafeDelete ( pSorter );
printf ( "---\n" );
}
static int g_iFieldsCount = 0;
void DoIndexing ( CSphSource_SQL * pSrc, RtIndex_i * pIndex )
{
CSphString sError, sWarning, sFilter;
int64_t tmStart = sphMicroTimer ();
int64_t tmAvgCommit = 0;
int64_t tmMaxCommit = 0;
int iCommits = 0;
InsertDocData_c tDoc ( pIndex->GetMatchSchema() );
tDoc.m_dFields.Resize(g_iFieldsCount);
int iDynamic = pIndex->GetMatchSchema().GetRowSize();
RtAccum_t tAcc;
while (true)
{
bool bEOF = false;
const char ** pFields = (const char **)pSrc->NextDocument ( bEOF, sError );
if ( !pFields )
break;
ARRAY_FOREACH ( i, tDoc.m_dFields )
tDoc.m_dFields[i] = VecTraits_T<const char> ( pFields[i], strlen ( pFields[i] ) );
tDoc.m_tDoc.Combine ( pSrc->m_tDocInfo, iDynamic );
if ( !bEOF )
pIndex->AddDocument ( tDoc, false, sFilter, sError, sWarning, &tAcc );
auto& const_stat = pSrc->GetStats ();
++const_cast<CSphSourceStats&>(const_stat).m_iTotalDocuments;
if ( ( pSrc->GetStats().m_iTotalDocuments % COMMIT_STEP )==0 || bEOF )
{
int64_t tmCommit = sphMicroTimer();
pIndex->Commit ( NULL, &tAcc );
tmCommit = sphMicroTimer()-tmCommit;
iCommits++;
tmAvgCommit += tmCommit;
tmMaxCommit = Max ( tmMaxCommit, tmCommit );
if ( bEOF )
{
tmAvgCommit /= iCommits;
break;
}
}
if (!( pSrc->GetStats().m_iTotalDocuments % 100 ))
printf ( "%d docs\r", (int)pSrc->GetStats().m_iTotalDocuments );
static bool bOnce = true;
if ( iCommits*COMMIT_STEP>=5000 && bOnce )
{
printf ( "\n" );
DoSearch ( pIndex );
bOnce = false;
}
}
pSrc->Disconnect();
int64_t tmEnd = sphMicroTimer ();
float fTotalMB = (float)pSrc->GetStats().m_iTotalBytes/1000000.0f;
printf ( "commit-step %d, %d docs, %d bytes, %d.%03d sec, %.2f MB/sec\n",
COMMIT_STEP,
(int)pSrc->GetStats().m_iTotalDocuments,
(int)pSrc->GetStats().m_iTotalBytes,
(int)((tmEnd-tmStart)/1000000), (int)(((tmEnd-tmStart)%1000000)/1000),
fTotalMB*1000000.0f/(tmEnd-tmStart) );
printf ( "commit-docs %d, avg %d.%03d msec, max %d.%03d msec\n", COMMIT_STEP,
(int)(tmAvgCommit/1000), (int)(tmAvgCommit%1000),
(int)(tmMaxCommit/1000), (int)(tmMaxCommit%1000) );
g_fTotalMB += fTotalMB;
}
// copy-pasted chunk from indexer.cpp
// FIXME! it would be good to isolate that code and reuse instead of c-pasting
static bool g_bPrintQueries = false;
static int g_iMaxFileFieldBuffer = 8 * 1024 * 1024;
static ESphOnFileFieldError g_eOnFileFieldError = FFE_IGNORE_FIELD;
/////////////////////////////////////////////////////////////////////////////
/// parse multi-valued attr definition
bool ParseMultiAttr ( const char * sBuf, CSphColumnInfo &tAttr, const char * sSourceName )
{
// format is as follows:
//
// multi-valued-attr := ATTR-TYPE ATTR-NAME 'from' SOURCE-TYPE [;QUERY] [;RANGE-QUERY]
// ATTR-TYPE := 'uint' | 'timestamp' | 'bigint'
// SOURCE-TYPE := 'field' | 'query' | 'ranged-query'
const char * sTok = NULL;
int iTokLen = -1;
#define LOC_ERR( _arg, _pos ) \
{ \
if ( !*(_pos) ) \
fprintf ( stdout, "ERROR: source '%s': unexpected end of line in sql_attr_multi.\n", sSourceName ); \
else \
fprintf ( stdout, "ERROR: source '%s': expected " _arg " in sql_attr_multi, got '%s'.\n", sSourceName, _pos ); \
return false; \
}
#define LOC_SPACE0() { while ( isspace(*sBuf) ) sBuf++; }
#define LOC_SPACE1() { if ( !isspace(*sBuf) ) LOC_ERR ( "token", sBuf ) ; LOC_SPACE0(); }
#define LOC_TOK() { sTok = sBuf; while ( sphIsAlpha(*sBuf) ) sBuf++; iTokLen = sBuf-sTok; }
#define LOC_TOKEQ( _arg ) ( iTokLen==(int)strlen(_arg) && strncasecmp ( sTok, _arg, iTokLen )==0 )
#define LOC_TEXT() { if ( *sBuf!=';') LOC_ERR ( "';'", sBuf ); sTok = ++sBuf; while ( *sBuf && *sBuf!=';' ) sBuf++; iTokLen = sBuf-sTok; }
// handle ATTR-TYPE
LOC_SPACE0();
LOC_TOK();
if ( LOC_TOKEQ( "uint" ) )
tAttr.m_eAttrType = SPH_ATTR_UINT32SET;
else if ( LOC_TOKEQ( "timestamp" ) )
tAttr.m_eAttrType = SPH_ATTR_UINT32SET;
else if ( LOC_TOKEQ( "bigint" ) )
tAttr.m_eAttrType = SPH_ATTR_INT64SET;
else LOC_ERR ( "attr type ('uint' or 'timestamp' or 'bigint')", sTok );
// handle ATTR-NAME
LOC_SPACE1();
LOC_TOK ();
if ( iTokLen )
tAttr.m_sName.SetBinary ( sTok, iTokLen );
else LOC_ERR ( "attr name", sTok );
// handle 'from'
LOC_SPACE1();
LOC_TOK();
if ( !LOC_TOKEQ( "from" ) ) LOC_ERR ( "'from' keyword", sTok );
// handle SOURCE-TYPE
LOC_SPACE1();
LOC_TOK();
LOC_SPACE0();
if ( LOC_TOKEQ( "field" ) )
tAttr.m_eSrc = SPH_ATTRSRC_FIELD;
else if ( LOC_TOKEQ( "query" ) )
tAttr.m_eSrc = SPH_ATTRSRC_QUERY;
else if ( LOC_TOKEQ( "ranged-query" ) )
tAttr.m_eSrc = SPH_ATTRSRC_RANGEDQUERY;
else if ( LOC_TOKEQ( "ranged-main-query" ) )
tAttr.m_eSrc = SPH_ATTRSRC_RANGEDMAINQUERY;
else LOC_ERR ( "value source type ('field', or 'query', or 'ranged-query', or 'ranged-main-query')", sTok );
if ( tAttr.m_eSrc==SPH_ATTRSRC_FIELD )
return true;
// handle QUERY
LOC_TEXT();
if ( iTokLen )
tAttr.m_sQuery.SetBinary ( sTok, iTokLen );
else LOC_ERR ( "query", sTok );
if ( tAttr.m_eSrc==SPH_ATTRSRC_QUERY || tAttr.m_eSrc==SPH_ATTRSRC_RANGEDMAINQUERY )
return true;
// handle RANGE-QUERY
LOC_TEXT();
if ( iTokLen )
tAttr.m_sQueryRange.SetBinary ( sTok, iTokLen );
else LOC_ERR ( "range query", sTok );
#undef LOC_ERR
#undef LOC_SPACE0
#undef LOC_SPACE1
#undef LOC_TOK
#undef LOC_TOKEQ
#undef LOC_TEXT
return true;
}
#define LOC_CHECK( _hash, _key, _msg, _add ) \
if (!( _hash.Exists ( _key ) )) \
{ \
fprintf ( stdout, "ERROR: key '%s' not found " _msg "\n", _key, _add ); \
return false; \
}
// get string
#define LOC_GETS( _arg, _key ) \
if ( hSource.Exists(_key) ) \
_arg = hSource[_key].strval();
// get int
#define LOC_GETI( _arg, _key ) \
if ( hSource.Exists(_key) && hSource[_key].intval() ) \
_arg = hSource[_key].intval();
// get int64_t
#define LOC_GETL( _arg, _key ) \
if ( hSource.Exists(_key) ) \
_arg = hSource[_key].int64val();
// get bool
#define LOC_GETB( _arg, _key ) \
if ( hSource.Exists(_key) ) \
_arg = ( hSource[_key].intval()!=0 );
// get array of strings
#define LOC_GETA( _arg, _key ) \
for ( CSphVariant * pVal = hSource(_key); pVal; pVal = pVal->m_pNext ) \
_arg.Add ( pVal->cstr() );
void SqlAttrsConfigure ( CSphSourceParams_SQL &tParams, const CSphVariant * pHead, ESphAttr eAttrType
, const char * sSourceName, bool bIndexedAttr = false )
{
for ( const CSphVariant * pCur = pHead; pCur; pCur = pCur->m_pNext )
{
CSphColumnInfo tCol ( pCur->cstr (), eAttrType );
char * pColon = strchr ( const_cast<char *> ( tCol.m_sName.cstr () ), ':' );
if ( pColon )
{
*pColon = '\0';
if ( eAttrType==SPH_ATTR_INTEGER )
{
int iBits = strtol ( pColon + 1, NULL, 10 );
if ( iBits<=0 || iBits>ROWITEM_BITS )
{
fprintf ( stdout, "WARNING: source '%s': attribute '%s': invalid bitcount=%d (bitcount ignored)\n"
, sSourceName, tCol.m_sName.cstr (), iBits );
iBits = -1;
}
tCol.m_tLocator.m_iBitCount = iBits;
} else
{
fprintf ( stdout, "WARNING: source '%s': attribute '%s': bitcount is only supported for integer types\n"
, sSourceName, tCol.m_sName.cstr () );
}
}
tParams.m_dAttrs.Add ( tCol );
if ( bIndexedAttr )
tParams.m_dAttrs.Last ().m_bIndexed = true;
}
}
#if WITH_ZLIB
bool ConfigureUnpack ( CSphVariant * pHead, ESphUnpackFormat eFormat, CSphSourceParams_SQL &tParams, const char * )
{
for ( CSphVariant * pVal = pHead; pVal; pVal = pVal->m_pNext )
{
CSphUnpackInfo &tUnpack = tParams.m_dUnpack.Add ();
tUnpack.m_sName = CSphString ( pVal->cstr () );
tUnpack.m_eFormat = eFormat;
}
return true;
}
#else
bool ConfigureUnpack ( CSphVariant * pHead, ESphUnpackFormat, CSphSourceParams_SQL &, const char * sSourceName )
{
if ( pHead )
{
fprintf ( stdout, "ERROR: source '%s': unpack is not supported, rebuild with zlib\n", sSourceName );
return false;
}
return true;
}
#endif // WITH_ZLIB
bool ParseJoinedField ( const char * sBuf, CSphJoinedField * pField, const char * sSourceName )
{
// sanity checks
assert ( pField );
if ( !sBuf || !sBuf[0] )
{
fprintf ( stdout, "ERROR: source '%s': sql_joined_field must not be empty.\n", sSourceName );
return false;
}
#define LOC_ERR( _exp ) \
{ \
fprintf ( stdout, "ERROR: source '%s': expected " _exp " in sql_joined_field, got '%s'.\n", sSourceName, sBuf ); \
return false; \
}
#define LOC_TEXT() { if ( *sBuf!=';') LOC_ERR ( "';'" ); sTmp = ++sBuf; while ( *sBuf && *sBuf!=';' ) sBuf++; iTokLen = sBuf-sTmp; }
// parse field name
while ( isspace ( *sBuf ) )
sBuf++;
const char * sName = sBuf;
while ( sphIsAlpha ( *sBuf ) )
sBuf++;
if ( sBuf==sName ) LOC_ERR ( "field name" );
pField->m_sName.SetBinary ( sName, sBuf - sName );
if ( !isspace ( *sBuf ) ) LOC_ERR ( "space" );
while ( isspace ( *sBuf ) )
sBuf++;
// parse 'from'
if ( strncasecmp ( sBuf, "from", 4 ) ) LOC_ERR ( "'from'" );
sBuf += 4;
if ( !isspace ( *sBuf ) ) LOC_ERR ( "space" );
while ( isspace ( *sBuf ) )
sBuf++;
bool bGotRanged = false;
pField->m_bPayload = false;
pField->m_bRangedMain = false;
// parse 'query'
if ( strncasecmp ( sBuf, "payload-query", 13 )==0 )
{
pField->m_bPayload = true;
sBuf += 13;
} else if ( strncasecmp ( sBuf, "query", 5 )==0 )
{
sBuf += 5;
} else if ( strncasecmp ( sBuf, "ranged-query", 12 )==0 )
{
bGotRanged = true;
sBuf += 12;
} else if ( strncasecmp ( sBuf, "ranged-main-query", 17 )==0 )
{
pField->m_bRangedMain = true;
sBuf += 17;
} else LOC_ERR ( "'query'" );
// parse ';'
while ( isspace ( *sBuf ) && *sBuf!=';' )
sBuf++;
if ( *sBuf!=';' ) LOC_ERR ( "';'" );
// handle QUERY
const char * sTmp = sBuf;
int iTokLen = 0;
LOC_TEXT();
if ( iTokLen )
pField->m_sQuery.SetBinary ( sTmp, iTokLen );
else LOC_ERR ( "query" );
if ( !bGotRanged )
return true;
// handle RANGE-QUERY
LOC_TEXT();
if ( iTokLen )
pField->m_sRanged.SetBinary ( sTmp, iTokLen );
else LOC_ERR ( "range query" );
#undef LOC_ERR
#undef LOC_TEXT
return true;
}
bool SqlParamsConfigure ( CSphSourceParams_SQL &tParams, const CSphConfigSection &hSource, const char * sSourceName )
{
if ( !hSource.Exists (
"odbc_dsn" ) ) // in case of odbc source, the host, user, pass and db are not mandatory, since they may be already defined in dsn string.
{
LOC_CHECK ( hSource, "sql_host", "in source '%s'", sSourceName );
LOC_CHECK ( hSource, "sql_user", "in source '%s'", sSourceName );
LOC_CHECK ( hSource, "sql_pass", "in source '%s'", sSourceName );
LOC_CHECK ( hSource, "sql_db", "in source '%s'", sSourceName );
}
LOC_CHECK ( hSource, "sql_query", "in source '%s'", sSourceName );
LOC_GETS ( tParams.m_sHost, "sql_host" );
LOC_GETS ( tParams.m_sUser, "sql_user" );
LOC_GETS ( tParams.m_sPass, "sql_pass" );
LOC_GETS ( tParams.m_sDB, "sql_db" );
LOC_GETI ( tParams.m_uPort, "sql_port" );
LOC_GETS ( tParams.m_sQuery, "sql_query" );
LOC_GETA ( tParams.m_dQueryPre, "sql_query_pre" );
LOC_GETA ( tParams.m_dQueryPreAll, "sql_query_pre_all" );
LOC_GETA ( tParams.m_dQueryPost, "sql_query_post" );
LOC_GETS ( tParams.m_sQueryRange, "sql_query_range" );
LOC_GETA ( tParams.m_dQueryPostIndex, "sql_query_post_index" );
LOC_GETL ( tParams.m_iRangeStep, "sql_range_step" );
LOC_GETS ( tParams.m_sQueryKilllist, "sql_query_killlist" );
LOC_GETS ( tParams.m_sHookConnect, "hook_connect" );
LOC_GETS ( tParams.m_sHookQueryRange, "hook_query_range" );
LOC_GETS ( tParams.m_sHookPostIndex, "hook_post_index" );
LOC_GETI ( tParams.m_iRangedThrottleMs, "sql_ranged_throttle" );
SqlAttrsConfigure ( tParams, hSource ( "sql_attr_uint" ), SPH_ATTR_INTEGER, sSourceName );
SqlAttrsConfigure ( tParams, hSource ( "sql_attr_timestamp" ), SPH_ATTR_TIMESTAMP, sSourceName );
SqlAttrsConfigure ( tParams, hSource ( "sql_attr_bool" ), SPH_ATTR_BOOL, sSourceName );
SqlAttrsConfigure ( tParams, hSource ( "sql_attr_float" ), SPH_ATTR_FLOAT, sSourceName );
SqlAttrsConfigure ( tParams, hSource ( "sql_attr_bigint" ), SPH_ATTR_BIGINT, sSourceName );
SqlAttrsConfigure ( tParams, hSource ( "sql_attr_string" ), SPH_ATTR_STRING, sSourceName );
SqlAttrsConfigure ( tParams, hSource ( "sql_attr_json" ), SPH_ATTR_JSON, sSourceName );
SqlAttrsConfigure ( tParams, hSource ( "sql_field_string" ), SPH_ATTR_STRING, sSourceName, true );
LOC_GETA ( tParams.m_dFileFields, "sql_file_field" );
tParams.m_iMaxFileBufferSize = g_iMaxFileFieldBuffer;
tParams.m_iRefRangeStep = tParams.m_iRangeStep;
tParams.m_eOnFileFieldError = g_eOnFileFieldError;
// unpack
if ( !ConfigureUnpack ( hSource ( "unpack_zlib" ), SPH_UNPACK_ZLIB, tParams, sSourceName ) )
return false;
if ( !ConfigureUnpack ( hSource ( "unpack_mysqlcompress" ), SPH_UNPACK_MYSQL_COMPRESS, tParams, sSourceName ) )
return false;
tParams.m_uUnpackMemoryLimit = hSource.GetSize ( "unpack_mysqlcompress_maxsize", 16777216 );
// parse multi-attrs
for ( CSphVariant * pVal = hSource ( "sql_attr_multi" ); pVal; pVal = pVal->m_pNext )
{
CSphColumnInfo tAttr;
if ( !ParseMultiAttr ( pVal->cstr (), tAttr, sSourceName ) )
return false;
tParams.m_dAttrs.Add ( tAttr );
}
// parse joined fields
for ( CSphVariant * pVal = hSource ( "sql_joined_field" ); pVal; pVal = pVal->m_pNext )
if ( !ParseJoinedField ( pVal->cstr (), &tParams.m_dJoinedFields.Add (), sSourceName ) )
return false;
// make sure attr names are unique
ARRAY_FOREACH ( i, tParams.m_dAttrs )
for ( int j = i + 1; j<tParams.m_dAttrs.GetLength (); j++ )
{
const CSphString &sName = tParams.m_dAttrs[i].m_sName;
if ( sName==tParams.m_dAttrs[j].m_sName )
{
fprintf ( stdout, "ERROR: duplicate attribute name: %s\n", sName.cstr () );
return false;
}
}
// additional checks
if ( tParams.m_iRangedThrottleMs<0 )
{
fprintf ( stdout, "WARNING: sql_ranged_throttle must not be negative; throttling disabled\n" );
tParams.m_iRangedThrottleMs = 0;
}
// debug printer
if ( g_bPrintQueries )
tParams.m_bPrintQueries = true;
return true;
}
CSphSource_SQL * SpawnSource ( const char * sSourceName, const CSphConfigType &hSources, TokenizerRefPtr_c pTok, DictRefPtr_c pDict )
{
const CSphConfigSection &hSource = hSources[sSourceName];
assert ( hSource["type"]=="mysql" );
CSphSourceParams_MySQL tParams;
if ( !SqlParamsConfigure ( tParams, hSource, sSourceName ) )
return nullptr;
LOC_GETS ( tParams.m_sUsock, "sql_sock" );
LOC_GETI ( tParams.m_iFlags, "mysql_connect_flags" );
LOC_GETS ( tParams.m_sSslKey, "mysql_ssl_key" );
LOC_GETS ( tParams.m_sSslCert, "mysql_ssl_cert" );
LOC_GETS ( tParams.m_sSslCA, "mysql_ssl_ca" );
//auto * pSrc = new CSphSource_MySQL ( "sSourceName" );
auto * pSrc = CreateSourceMysql ( tParams, "sSourceName" );
if (!pSrc)
sphDie ( "setup failed" );
pSrc->SetTokenizer ( std::move ( pTok ) );
pSrc->SetDict ( pDict );
SetupIndexing ( pSrc );
return (CSphSource_SQL*) pSrc;
}
static RtIndex_i * g_pIndex = NULL;
void IndexingThread ( void * pArg )
{
auto * pSrc = (CSphSource_SQL *) pArg;
DoIndexing ( pSrc, g_pIndex );
}
int main ( int argc, char ** argv )
{
if ( argc==2 )
COMMIT_STEP = atoi ( argv[1] );
// threads should be initialized before memory allocations
char cTopOfMainStack;
Threads::Init();
Threads::PrepareMainThread ( &cTopOfMainStack );
CSphConfig hConf;
ParseConfig ( &hConf, "internal", FROMS(rtestconfig) );
const CSphConfigType &hSources = hConf["source"];
CSphString sError;
CSphDictSettings tDictSettings;
tDictSettings.m_bWordDict = false;
TokenizerRefPtr_c pTok = Tokenizer::Detail::CreateUTF8Tokenizer();
DictRefPtr_c pDict {sphCreateDictionaryCRC ( tDictSettings, NULL, pTok, "rt1", false, 32, nullptr, sError )};
auto * pSrc = SpawnSource ( "test1", hSources, pTok, pDict );
TokenizerRefPtr_c pTok2 { Tokenizer::Detail::CreateUTF8Tokenizer() };
DictRefPtr_c pDict2 {sphCreateDictionaryCRC ( tDictSettings, NULL, pTok, "rt2", false, 32, nullptr, sError )};
auto * pSrc2 = SpawnSource ( "test2", hSources, pTok2, pDict2 );
CSphSchema tSrcSchema;
if ( !pSrc->UpdateSchema ( &tSrcSchema, sError ) )
sphDie ( "update-schema failed: %s", sError.cstr() );
CSphSchema tSchema; // source schema must be all dynamic attrs; but index ones must be static
for ( int i=0; i<tSrcSchema.GetFieldsCount(); i++ )
tSchema.AddField ( tSrcSchema.GetField(i) );
for ( int i=0; i<tSrcSchema.GetAttrsCount(); i++ )
tSchema.AddAttr ( tSrcSchema.GetAttr(i), false );
g_iFieldsCount = tSrcSchema.GetFieldsCount();
CSphConfigSection tRTConfig;
sphRTInit ( "" );
Binlog::Configure ( tRTConfig, 0 );
SmallStringHash_T< CSphIndex * > dTemp;
Binlog::Replay ( dTemp );
auto pIndex = sphCreateIndexRT ( "testrt", DATAFLD "dump", tSchema, 32*1024*1024, false ).release();
pIndex->SetTokenizer ( pTok ); // index will own this pair from now on
pIndex->SetDictionary ( pDict );
StrVec_t dWarnings;
if ( !pIndex->Prealloc ( false, nullptr, dWarnings ) )
sphDie ( "prealloc failed: %s", pIndex->GetLastError().cstr() );
pIndex->PostSetup();
g_pIndex = pIndex;
// initial indexing
int64_t tmStart = sphMicroTimer();
SphThread_t t1, t2;
Threads::Create ( &t1, [pSrc] { IndexingThread ( pSrc ); } );
Threads::Create ( &t2, [pSrc2] { IndexingThread ( pSrc2 ); } );
Threads::Join ( &t1 );
Threads::Join ( &t2 );
#if 0
// update
tParams.m_sQuery = "SELECT id, channel_id, UNIX_TIMESTAMP(published) published, title, "
"UNCOMPRESS(content) content FROM rt2 WHERE id<=10000";
SetupIndexing ( pSrc, tParams );
DoIndexing ( pSrc, pIndex );
#endif
// search
DoSearch ( pIndex );
// shutdown index (should cause dump)
int64_t tmShutdown = sphMicroTimer();
#if SPH_ALLOCS_PROFILER
printf ( "pre-shutdown allocs=%d, bytes=" INT64_FMT "\n", sphAllocsCount(), sphAllocBytes() );
#endif
SafeDelete ( pIndex );
#if SPH_ALLOCS_PROFILER
printf ( "post-shutdown allocs=%d, bytes=" INT64_FMT "\n", sphAllocsCount(), sphAllocBytes() );
#endif
int64_t tmEnd = sphMicroTimer();
printf ( "shutdown done in %d.%03d sec\n", (int)((tmEnd-tmShutdown)/1000000), (int)(((tmEnd-tmShutdown)%1000000)/1000) );
printf ( "total with shutdown %d.%03d sec, %.2f MB/sec\n",
(int)((tmEnd-tmStart)/1000000), (int)(((tmEnd-tmStart)%1000000)/1000),
g_fTotalMB*1000000.0f/(tmEnd-tmStart) );
#if SPH_DEBUG_LEAKS || SPH_ALLOCS_PROFILER
sphAllocsStats();
#endif
#if _WIN32
PROCESS_MEMORY_COUNTERS pmc;
HANDLE hProcess = OpenProcess ( PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, FALSE, GetCurrentProcessId() );
if ( hProcess && GetProcessMemoryInfo ( hProcess, &pmc, sizeof(pmc)) )
{
printf ( "--- peak-wss=%d, peak-pagefile=%d\n", (int)pmc.PeakWorkingSetSize, (int)pmc.PeakPagefileUsage );
}
#endif
SafeDelete ( pIndex );
Binlog::Deinit ();
}
// BLOODY DIRTY HACK!!!
// definitions of AggrResult_t members just copy-pasted 'as is' from searchdaemon.cpp
int AggrResult_t::GetLength () const
{
int iCount = 0;
m_dResults.Apply ( [&iCount] ( const OneResultset_t & a ) { iCount += a.m_dMatches.GetLength (); } );
return iCount;
}
void AggrResult_t::ClampMatches ( int iLimit )
{
assert ( m_bSingle );
if ( !m_dResults.IsEmpty () )
m_dResults.First ().ClampMatches ( iLimit );
}
void AggrResult_t::ClampAllMatches ()
{
for ( auto& dResult : m_dResults )
dResult.ClampAllMatches();
}
int OneResultset_t::FillFromSorter ( ISphMatchSorter * pQueue )
{
if ( !pQueue )
return 0;
assert ( m_dMatches.IsEmpty () );
m_tSchema = *pQueue->GetSchema ();
if ( !pQueue->GetLength () )
return 0;
int iCopied = pQueue->Flatten ( m_dMatches.AddN ( pQueue->GetLength () ) );
m_dMatches.Resize ( iCopied );
return iCopied;
}
void OneResultset_t::ClampAllMatches ()
{
for ( auto& dMatch : m_dMatches )
m_tSchema.FreeDataPtrs ( dMatch );
m_dMatches.Reset();
}
void OneResultset_t::ClampMatches ( int iLimit )
{
assert ( iLimit>0 );
int iMatches = m_dMatches.GetLength ();
for ( int i = iLimit; i<iMatches; ++i )
m_tSchema.FreeDataPtrs ( m_dMatches[i] );
m_dMatches.Resize ( iLimit );
}
OneResultset_t::~OneResultset_t()
{
for ( auto & dMatch : m_dMatches )
m_tSchema.FreeDataPtrs ( dMatch );
}
| 22,590
|
C++
|
.cpp
| 614
| 34.184039
| 149
| 0.673232
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
16,876
|
hazard_pointer.cpp
|
manticoresoftware_manticoresearch/src/hazard_pointer.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "hazard_pointer.h"
#include "threadutils.h"
static const DWORD MIN_POINTERS = 100;
static const int MULTIPLIER = 2;
// if one hazard object owns another, etc. - how deep they can be nested.
// on finish, we will stop unwinding under this N of steps and report error.
static const int NESTED_LEVELS = 16;
using namespace hazard;
namespace {
inline int GetCleanupSize ()
{
DWORD uSize = Threads::GetNumOfRunning () * POINTERS_PER_THREAD * MULTIPLIER;
return (int) Max ( uSize, MIN_POINTERS );
}
}
struct ListedPointer_t : public AtomicPointer_t
{
ListedPointer_t* m_pNext;
};
using VecListedPointers_t = CSphFixedVector<ListedPointer_t>;
// raw pointer and specific deleter which knows how to deal with the pointer
//using RetiredPointer_t = std::pair <void*,Deleter_fn>; <- pair is NOT trivially copyable... need manual struct
struct RetiredPointer_t
{
void* m_pPtr;
hazard::Deleter_fn m_pDeleter;
};
// for tracking that alloc/dealloc of hazard is in one thread
static ThreadRole thHazardThread;
using fnHazardProcessor=std::function<void ( Pointer_t )>;
// to be write-used in single thread, so no need to sync anything
// that is the lowest level which actually stores current pointers.
struct Storage_t : public VecListedPointers_t
{
ListedPointer_t* m_pHead GUARDED_BY ( thHazardThread );
explicit Storage_t ( int iSize )
: VecListedPointers_t ( iSize )
{
// link as list
for (auto i=0; i<m_iCount; ++i )
{
m_pData[i].m_pNext = m_pData+i+1;
m_pData[i].m_pData.store ( nullptr, std::memory_order_relaxed );
}
Last().m_pNext = nullptr;
m_pHead = m_pData;
}
bool IsFull() const REQUIRES ( thHazardThread )
{
return !m_pHead;
}
ListedPointer_t* Alloc () ACQUIRE ( thHazardThread ) NO_THREAD_SAFETY_ANALYSIS
{
AcquireRole ( thHazardThread );
assert (!IsFull());
auto pElem = m_pHead;
m_pHead = pElem->m_pNext;
return pElem;
}
void Dealloc ( ListedPointer_t* pElem ) noexcept RELEASE ( thHazardThread ) NO_THREAD_SAFETY_ANALYSIS
{
if (!pElem)
return;
pElem->m_pData.store ( nullptr, std::memory_order_release );
pElem->m_pNext = m_pHead;
m_pHead = pElem;
ReleaseRole ( thHazardThread );
}
};
// to ensure whole retiring operation done in one thread
static ThreadRole thRetiringThread;
using VecRetiredPointers_t = CSphVector<RetiredPointer_t>;
// that is single-threaded in use, so no lock-free loops necessary.
struct VecOfRetired_t : public VecRetiredPointers_t, public ISphNoncopyable
{
int m_iCurrent GUARDED_BY ( thRetiringThread ) = 0;
inline bool IsFull () const noexcept REQUIRES ( thRetiringThread )
{
return m_iCurrent>=GetLength ();
}
inline bool NotEmpty () const noexcept REQUIRES ( thRetiringThread )
{
return m_iCurrent>0;
}
// retire element, and return false, if storage is full
bool Retire ( RetiredPointer_t && tData ) noexcept REQUIRES ( thRetiringThread )
{
At ( m_iCurrent++ ) = tData;
return m_iCurrent<GetLength ();
}
void Swap ( VecOfRetired_t& rhs) REQUIRES ( thRetiringThread )
{
// this long line is for atomic 'swap ( m_iCurrent, rhs.m_iCurrent )'
::Swap ( m_iCurrent, rhs.m_iCurrent );
SwapData ( rhs );
}
};
// when thread hazard destructs, it may still have some alive retired
// we will leak them into this sink, and sometimes refine with another threads.
class RetiredSink_c
{
CSphMutex m_dGuard;
VecRetiredPointers_t m_dSink GUARDED_BY (m_dGuard);
void PruneSink () REQUIRES ( m_dGuard );
void MaybePruneSink () REQUIRES ( m_dGuard );
public:
void AdoptRetired ( VecOfRetired_t& dRetired ) EXCLUDES ( m_dGuard );
void FinallyPruneSink () EXCLUDES ( m_dGuard );
};
// main hazard pointer storage class (per-thread)
class ThreadState_c : public ISphNoncopyable
{
Storage_t m_tHazards;
VecOfRetired_t m_tRetired GUARDED_BY ( thRetiringThread );
static RetiredSink_c m_dGlobalSink;
// generic main GC procedure
void Scan();
// the part where only trimming (no resize adopt, no resize at the end).
void PruneRetired ();
public:
ThreadState_c ();
~ThreadState_c ();
// Main point to retire (delete) pointer. bNow forces to try actual removing right now
void Retire ( RetiredPointer_t tPtr, bool bNow );
// called once globally when daemon finishes - to finally delete everything
void Shutdown();
AtomicPointer_t* HazardAlloc();
void HazardDealloc ( AtomicPointer_t * pPointer );
void IterateHazards ( fnHazardProcessor&& fnProcessor ) const;
static void FinallyPruneSink();
};
namespace { // unnamed (static)
// delete an object stored in RetiredPointer
void PrunePointer ( RetiredPointer_t* pPtr )
{
assert (pPtr);
assert (pPtr->m_pDeleter);
pPtr->m_pDeleter ( pPtr->m_pPtr );
}
// main GC procedure. Implements hazard pointers cleaning
/*
* Sources:
- [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
- [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
- [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
- Inspired with libcds by Maxim Khizhinsky (libcds.dev@gmail.com) - http://github.com/khizmax/libcds/
Gc performed in four steps:
1. We walk over all currently running threads and collect their hazard pointers
2. Sort and uniq the collection
3. Walk over current list of retired: if pointer is not referred by any hazard - it is finally pruned
4. Keep list of alive (referred) pointers back to list of retired.
Since size of the list is by design greater than N of threads * N of hazards per thread, it WILL prune
some pointers definitely.
*/
CSphVector<Pointer_t> CollectActiveHazardPointers()
{
// prepare vec for collect active hazard pointers
CSphVector <Pointer_t> dActive;
dActive.Reserve ( Threads::GetNumOfRunning () * POINTERS_PER_THREAD );
// stage 1. Walk over ALL currently running threads and collect their hazard pointers
Threads::IterateActive ([&dActive] ( Threads::LowThreadDesc_t * pDesc )
{
if (!pDesc)
return;
auto pOtherThreadState = (ThreadState_c *) pDesc->m_pHazards.load ( std::memory_order_relaxed );
if ( !pOtherThreadState )
return;
pOtherThreadState->IterateHazards ( [&dActive] ( Pointer_t pPtr ) { dActive.Add ( pPtr ); } );
});
// stage 2. sort and uniq; we will use binsearch then
dActive.Uniq();
return dActive;
}
// Perform real work. If pruning object, in turn, includes hazard-guarded data inside,
// it will be retired into retiring list of current thread.
// Alive will be moved to the beginning of provided array, and function returns their quantity.
int PruneRetiredImpl ( RetiredPointer_t * pData, size_t iSize )
{
if ( !iSize )
return 0;
auto dActive = CollectActiveHazardPointers();
// stage 3-4. Keep in m_tRetired only active elems.
// Note that m_tRetired contains only unique elems, since it works as 'delete'
// (having non-unique immediately means that kind of double-free bug is in application).
auto * pDst = pData;
auto * pEnd = pData + iSize;
for ( auto * pSrc = pData; pSrc<pEnd; ++pSrc )
{
if ( !dActive.BinarySearch ( pSrc->m_pPtr ) )
PrunePointer (pSrc); // stage 3 - delete non-alive
else { // stage 4 - copy alive, back in the list
if (pDst!=pSrc)
*pDst=*pSrc;
++pDst;
}
}
return (int) (pDst - pData);
}
// that is shortcut for the special cases, like retiring huge blob of mem.
// We try to prune it immediately and return whether it was success or not
bool TryPruneOnePointer ( RetiredPointer_t* pSrc )
{
auto dActive = CollectActiveHazardPointers ();
if ( dActive.BinarySearch ( pSrc->m_pPtr ) )
return false;
PrunePointer ( pSrc );
return true;
}
} // unnamed namespace
// when thread hazard destructs, it may still have some alive retired
// we will leak them into this sink, and sometimes refine with another threads.
void RetiredSink_c::PruneSink ()
{
auto iCompressed = PruneRetiredImpl ( m_dSink.begin (), m_dSink.GetLength () );
m_dSink.Resize ( iCompressed );
}
void RetiredSink_c::MaybePruneSink ()
{
if ( m_dSink.GetLength()>=GetCleanupSize () )
PruneSink();
}
// adopt retired chunk from finishing thread
// called from thread d-tr, to sink pointers finally lived.
// called from Shutdown, to sink last pointers in the process.
void RetiredSink_c::AdoptRetired ( VecOfRetired_t& dRetired )
{
ScopedMutex_t _ ( m_dGuard );
ScopedRole_c r ( thRetiringThread );
m_dSink.Append ( dRetired.Slice ( 0, dRetired.m_iCurrent ) );
dRetired.m_iCurrent = 0;
MaybePruneSink ();
}
RetiredSink_c ThreadState_c::m_dGlobalSink;
static ThreadState_c & ThreadState ()
{
static thread_local ThreadState_c tState;
return tState;
}
ThreadState_c::ThreadState_c ()
: m_tHazards { POINTERS_PER_THREAD }
{
m_tRetired.Resize ( GetCleanupSize () );
Threads::MyThd ().m_pHazards.store ( this, std::memory_order_relaxed );
}
ThreadState_c::~ThreadState_c ()
{
while ( m_tRetired.NotEmpty() )
m_dGlobalSink.AdoptRetired ( m_tRetired );
}
// GC entrypoint - called when no more place for retired
void ThreadState_c::Scan () REQUIRES ( thRetiringThread )
{
// Before start, check if num of threads grown and so, we just need more space for retired
if ( m_tRetired.GetLength() >= GetCleanupSize() )
PruneRetired ();
else
m_tRetired.Resize ( GetCleanupSize () );
}
// main GC worker
void ThreadState_c::PruneRetired () REQUIRES ( thRetiringThread )
{
// actually task is quite simple: we call ::PruneRetiredImpl, it keeps all alive pointers, prune all dead and return
// the new size of our vec, containing only alive.
// The problem is that deleting non-alive may include more to retire (if deleting object also has something to retire).
// Since we're in call PruneRetired because we're full, we can't right now retire any more pointers.
// So, we use this ping-pong with tmp vector to give room for nested retiring.
VecOfRetired_t tCurrentRetired;
tCurrentRetired.Resize ( GetCleanupSize () );
m_tRetired.Swap ( tCurrentRetired );
auto iRest = ::PruneRetiredImpl ( tCurrentRetired.begin (), tCurrentRetired.m_iCurrent );
m_tRetired.Swap ( tCurrentRetired );
m_tRetired.m_iCurrent = iRest;
tCurrentRetired.Resize( tCurrentRetired.m_iCurrent );
for ( auto dTailed : tCurrentRetired ) // some nested prune happened
Retire ( dTailed, false );
}
void ThreadState_c::Retire ( RetiredPointer_t tPtr, bool bNow )
{
if ( bNow && TryPruneOnePointer ( &tPtr ) )
return;
ScopedRole_c _ ( thRetiringThread );
if ( m_tRetired.Retire( std::move (tPtr) ) )
return;
Scan();
if ( m_tRetired.IsFull() )
Scan();
assert (!m_tRetired.IsFull());
}
AtomicPointer_t * ThreadState_c::HazardAlloc () ACQUIRE ( thHazardThread )
{
return m_tHazards.Alloc();
}
void ThreadState_c::HazardDealloc ( AtomicPointer_t * pPointer ) RELEASE ( thHazardThread )
{
m_tHazards.Dealloc ( (ListedPointer_t *) pPointer );
}
void ThreadState_c::IterateHazards ( fnHazardProcessor&& fnProcessor ) const
{
for ( const auto & tHazard : m_tHazards )
{
// here 'acquire' semantic is complement to 'release' semantic on thread saving hazard ptr.
auto pData = tHazard.m_pData.load ( std::memory_order_acquire );
if ( pData )
fnProcessor ( pData );
}
}
void RetiredSink_c::FinallyPruneSink ()
{
ScopedMutex_t _ ( m_dGuard );
PruneSink ();
// retire all non-sinked active
for ( auto& dTailed : m_dSink )
ThreadState ().Retire ( dTailed, false );
m_dSink.Reset();
}
void ThreadState_c::FinallyPruneSink()
{
m_dGlobalSink.FinallyPruneSink();
}
void ThreadState_c::Shutdown ()
{
ScopedRole_c _ ( thRetiringThread );
ThreadState_c::FinallyPruneSink ();
int iIteration=1;
while ( m_tRetired.NotEmpty () )
{
PruneRetired ();
sphLogDebug ( "ThreadState_c::Shutdown() iteration %d, has %d", iIteration, m_tRetired.m_iCurrent );
++iIteration;
if ( iIteration>NESTED_LEVELS ) // assume there are max 10 nested levels, change
{
auto iActive = CollectActiveHazardPointers ().GetLength();
auto iRunning = Threads::GetNumOfRunning ();
auto iPointers = m_tRetired.m_iCurrent;
sphWarning ( "Still %d threads, %d pointers, %d active pointers; assume deadlock, abort",
iRunning, iPointers, iActive );
return;
}
};
}
CSphVector<int> hazard::GetListOfPointed ( Accessor_fn&& fnAccess, int iCount )
{
CSphVector<int> dResult;
auto dActive = CollectActiveHazardPointers ();
for (int i=0; i<iCount;++i )
if ( dActive.BinarySearch ( fnAccess(i) ) )
dResult.Add(i);
return dResult;
}
hazard::Guard_c::Guard_c() ACQUIRE ( thHazardThread ) NO_THREAD_SAFETY_ANALYSIS
{
m_pGuard = ThreadState ().HazardAlloc ();
assert ( m_pGuard && "couldn't alloc hazard pointer. Not enough slots?" );
}
hazard::Guard_c::~Guard_c() RELEASE ( thHazardThread ) NO_THREAD_SAFETY_ANALYSIS
{
if ( m_pGuard )
ThreadState ().HazardDealloc ( (ListedPointer_t *) m_pGuard );
}
// main entry point to delete. On-the-fly filter out nullptr values.
void hazard::Retire ( Pointer_t pData, Deleter_fn fnDelete, bool bNow )
{
if ( pData )
ThreadState ().Retire ( { pData, fnDelete }, bNow );
}
void hazard::Shutdown ()
{
ThreadState ().Shutdown();
sphLogDebug ( "hazard::Shutdown() done" );
}
| 13,501
|
C++
|
.cpp
| 379
| 33.522427
| 120
| 0.739034
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,877
|
sphinxql_extra.cpp
|
manticoresoftware_manticoresearch/src/sphinxql_extra.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "sphinxql_extra.h"
#include "sphinxql_debug.h"
struct BlobLocator_t
{
int m_iStart;
int m_iLen;
};
class SqlExtraParser_c : public SqlParserTraits_c
{
public:
SqlExtraParser_c ( CSphVector<SqlStmt_t>& dStmt, const char* szQuery, CSphString* pError)
: SqlParserTraits_c ( dStmt, szQuery, pError )
{
if ( m_dStmt.IsEmpty() )
PushQuery();
else
m_pStmt = &m_dStmt.Last();
m_sErrorHeader = "P05:";
}
CSphString StrFromBlob ( BlobLocator_t tStr ) const
{
CSphString sResult;
sResult.SetBinary(m_pBuf+tStr.m_iStart, tStr.m_iLen);
return sResult;
}
void SetStatement ()
{
m_pStmt->m_eStmt = STMT_SET;
m_pStmt->m_eSet = SET_EXTRA;
}
inline void SetGlobalScope()
{
m_pStmt->m_iIntParam = 0;
}
inline void SetSessionScope()
{
m_pStmt->m_iIntParam = 1;
}
void AddIntval ( CSphVector<SqlInsert_t>& dVec, const SqlNode_t& tNode )
{
SqlInsert_t& tIns = dVec.Add();
tIns.CopyValueInt ( tNode );
tIns.m_iType = m_pStmt->m_iIntParam; // 0 or 1 - global or session int
}
void AddStrval ( CSphVector<SqlInsert_t>& dVec, const SqlNode_t& tNode )
{
SqlInsert_t& tIns = dVec.Add();
ToString ( tIns.m_sVal, tNode ).Unquote();
tIns.m_iType = 2 + m_pStmt->m_iIntParam; // 2 or 3 - global or session string
}
void AddSetName ( StrVec_t& dVec, const SqlNode_t& tNode )
{
auto& sIns = dVec.Add();
ToString ( sIns, tNode ).Unquote();
}
};
#define YYSTYPE SqlNode_t
// unused parameter, simply to avoid type clash between all my yylex() functions
#define YY_DECL inline int flex_extraparser ( YYSTYPE* lvalp, void* yyscanner, SqlExtraParser_c* pParser )
#include "flexsphinxql_extra.c"
static void yyerror ( SqlParserTraits_c* pParser, const char* szMessage )
{
// flex put a zero at last token boundary; make it undo that
yy6lex_unhold ( pParser->m_pScanner );
pParser->ProcessParsingError ( szMessage );
}
#ifndef NDEBUG
// using a proxy to be possible to debug inside yylex
inline int yylex ( YYSTYPE * lvalp, SqlExtraParser_c * pParser )
{
int res = flex_extraparser ( lvalp, pParser->m_pScanner, pParser );
return res;
}
#else
inline int yylex ( YYSTYPE * lvalp, SqlExtraParser_c * pParser )
{
return flex_extraparser ( lvalp, pParser->m_pScanner, pParser );
}
#endif
#include "bissphinxql_extra.c"
ParseResult_e ParseExtra ( Str_t sQuery, CSphVector<SqlStmt_t>& dStmt, CSphString& sError )
{
assert ( IsFilled ( sQuery ) );
SqlExtraParser_c tParser ( dStmt, sQuery.first, &sError );
tParser.m_pBuf = sQuery.first;
char * sEnd = const_cast<char *>( sQuery.first+sQuery.second );
sEnd[0] = 0; // prepare for yy_scan_buffer
sEnd[1] = 0; // this is ok because string allocates a small gap
yy6lex_init ( &tParser.m_pScanner );
YY_BUFFER_STATE tLexerBuffer = yy6_scan_buffer ( const_cast<char *>( sQuery.first ), sQuery.second+2, tParser.m_pScanner );
if ( !tLexerBuffer )
{
sError = "internal error: yy6_scan_buffer() failed";
return ParseResult_e::PARSE_ERROR;
}
int iRes = yyparse ( &tParser );
yy6_delete_buffer ( tLexerBuffer, tParser.m_pScanner );
yy6lex_destroy ( tParser.m_pScanner );
// special case - processing single comment directive
if ( sQuery.second > 1
&& sQuery.first[0] == '/'
&& sQuery.first[1] == '*'
&& (sError == "P05: syntax error, unexpected $end near '(null)'" // usual old bison
|| sError == "P05: syntax error, unexpected end of file near '(null)'") ) // new bison from homebrew
{
tParser.DefaultOk();
iRes = 0;
}
return ( iRes || dStmt.IsEmpty() ) ? ParseResult_e::PARSE_ERROR : ParseResult_e::PARSE_OK;
}
| 4,031
|
C++
|
.cpp
| 121
| 31.132231
| 124
| 0.71344
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,878
|
searchdconfig.cpp
|
manticoresoftware_manticoresearch/src/searchdconfig.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "searchdconfig.h"
#include "sphinxjson.h"
#include "searchdaemon.h"
#include "searchdha.h"
#include "searchdreplication.h"
#include "replication/configuration.h"
#include "fileutils.h"
#include "sphinxint.h"
#include "coroutine.h"
#include "sphinxpq.h"
#include "binlog.h"
#include "global_idf.h"
using namespace Threads;
static Coro::Mutex_c g_tSaveInProgress;
static Coro::RWLock_c g_tCfgIndexesLock;
// description of clusters and indexes loaded from internal config
static CSphVector<ClusterDesc_t> g_dCfgClusters;
static CSphVector<IndexDesc_t> g_dCfgIndexes GUARDED_BY ( g_tCfgIndexesLock );
static CSphString g_sLogFile;
static CSphString g_sDataDir;
static CSphString g_sConfigPath;
static bool g_bConfigless = false;
static CSphString GetPathForNewIndex ( const CSphString & sIndexName )
{
CSphString sRes;
if ( g_sDataDir.Length() && !g_sDataDir.Ends("/") && !g_sDataDir.Ends("\\") )
sRes.SetSprintf ( "%s/%s", g_sDataDir.cstr(), sIndexName.cstr() );
else
sRes.SetSprintf ( "%s%s", g_sDataDir.cstr(), sIndexName.cstr() );
return sRes;
}
CSphString GetDataDirInt()
{
return g_sDataDir;
}
CSphString GetDatadirPath ( const CSphString& sPath )
{
if ( sPath.IsEmpty() )
return GetDataDirInt();
return SphSprintf ( "%s/%s", GetDataDirInt().cstr(), sPath.cstr() );
}
bool IsConfigless()
{
return g_bConfigless;
}
const CSphVector<ClusterDesc_t> & GetClustersInt()
{
return g_dCfgClusters;
}
void ModifyDaemonPaths ( CSphConfigSection & hSearchd, FixPathAbsolute_fn && fnPathFix )
{
if ( !IsConfigless() )
return;
const char * szBinlogKey = "binlog_path";
if ( !hSearchd.Exists(szBinlogKey) )
{
CSphString sBinlogDir;
sBinlogDir.SetSprintf ( "%s/binlog", GetDataDirInt().cstr() );
if ( !sphDirExists ( sBinlogDir.cstr() ) )
{
if ( !MkDir ( sBinlogDir.cstr() ) )
{
sphWarning ( "Unable to create binlog dir '%s'", sBinlogDir.cstr() );
return;
}
}
hSearchd.AddEntry ( szBinlogKey, sBinlogDir.cstr() );
}
if ( fnPathFix && hSearchd.Exists ( szBinlogKey ) )
{
CSphString sBinlogPath ( hSearchd.GetStr( szBinlogKey ) );
fnPathFix ( sBinlogPath );
hSearchd.Delete ( szBinlogKey );
hSearchd.AddEntry ( szBinlogKey, sBinlogPath.cstr() );
}
}
// support for old-style absolute paths
static void MakeRelativePath ( CSphString & sPath )
{
bool bAbsolute = strchr ( sPath.cstr(), '/' ) || strchr ( sPath.cstr(), '\\' );
if ( !bAbsolute )
sPath.SetSprintf ( "%s/%s/%s", GetDataDirInt().cstr(), sPath.cstr(), sPath.cstr() );
}
//////////////////////////////////////////////////////////////////////////
class FilenameBuilder_c : public FilenameBuilder_i
{
public:
explicit FilenameBuilder_c ( CSphString sIndex );
CSphString GetFullPath ( const CSphString & sName ) const final;
private:
const CSphString m_sIndex;
};
FilenameBuilder_c::FilenameBuilder_c ( CSphString sIndex )
: m_sIndex ( std::move ( sIndex ) )
{}
CSphString FilenameBuilder_c::GetFullPath ( const CSphString & sName ) const
{
if ( !IsConfigless() || !sName.Length() )
return sName;
CSphString sPath = GetPathForNewIndex ( m_sIndex );
StringBuilder_c sNewValue {" "};
StringBuilder_c sTmp;
// we assume that path has been stripped before
StrVec_t dValues = sphSplit ( sName.cstr(), sName.Length(), " \t," );
for ( auto & i : dValues )
{
if ( !i.Length() )
continue;
sTmp.Clear();
sTmp.Appendf ( "%s/%s", sPath.cstr(), i.Trim().cstr() );
sNewValue << RealPath ( sTmp.cstr() );
}
return (CSphString)sNewValue;
}
//////////////////////////////////////////////////////////////////////////
// parse and set cluster options into hash from single string
void ClusterOptions_t::Parse ( const CSphString & sOptions )
{
if ( !sOptions.IsEmpty() )
sph::ParseKeyValues ( sOptions.cstr(), [this] ( CSphString&& sIdent, const CSphString& sValue ) { m_hOptions.Add ( sValue, std::move(sIdent) ); }, ";" );
}
// get string of cluster options with semicolon delimiter
CSphString ClusterOptions_t::AsStr () const
{
StringBuilder_c tBuf ( ";" );
for ( const auto& tOpt : m_hOptions )
tBuf.Sprintf ( "%s=%s", tOpt.first.cstr(), tOpt.second.cstr() );
return (CSphString)tBuf;
}
bool ClusterOptions_t::IsEmpty() const noexcept
{
return m_hOptions.IsEmpty();
}
//////////////////////////////////////////////////////////////////////////
bool ClusterDesc_t::Parse ( const bson::Bson_c& tBson, const CSphString& sName, CSphString& sWarning )
{
using namespace bson;
if ( sName.IsEmpty() )
return TlsMsg::Err ( "empty cluster name" );
m_sName = sName;
m_dClusterNodes.Reset();
Bson_c tNodes { tBson.ChildByName ( "nodes" ) };
if ( tNodes.IsString() ) // old config - all nodes in one line, ','-separated
sphSplit ( m_dClusterNodes, String ( tNodes ).cstr(), "," );
else
tNodes.ForEach ( [this] ( const NodeHandle_t& tNode ) { m_dClusterNodes.Add ( String ( tNode ) ); } );
m_tOptions.m_hOptions.Reset();
Bson_c tOptions { tBson.ChildByName ( "options" ) };
if ( tOptions.IsString() ) // old config - all options in one line, need to be parsed
m_tOptions.Parse ( String ( tOptions ) );
else
tOptions.ForEach ( [this] ( CSphString&& sName, const NodeHandle_t& tNode ) { m_tOptions.m_hOptions.Add ( String ( tNode ), sName ); } );
int iItem = 0;
Bson_c ( tBson.ChildByName ( "indexes" ) ).ForEach ( [this,&iItem,&sWarning] ( const NodeHandle_t& tNode ) {
if ( IsString(tNode ) )
m_hIndexes.Add ( String ( tNode ));
else
sWarning.SetSprintf ( "table %d: name '%s' should be a string, skipped", iItem, m_sName.cstr() );
++iItem;
} );
m_sPath = String ( tBson.ChildByName ( "path" ) );
return true;
}
void operator<< ( JsonEscapedBuilder& tOut, const ClusterOptions_t& tOptions )
{
auto _ = tOut.ObjectW();
for ( const auto& tOpt : tOptions.m_hOptions )
{
if ( tOpt.first == "pc.bootstrap" ) // we always skip pc.bootstrap when store value into json
continue;
tOut.NamedString ( tOpt.first.cstr(), tOpt.second );
}
}
void ClusterDesc_t::Save ( JsonEscapedBuilder& tOut ) const
{
tOut.Named ( m_sName.cstr() );
auto _0 = tOut.ObjectW();
if ( !m_dClusterNodes.IsEmpty() )
{
tOut.Named ( "nodes" );
auto _1 = tOut.Array();
for_each ( m_dClusterNodes, [&tOut] ( const auto& sNode ) { tOut.String ( sNode ); } );
}
if ( !m_tOptions.IsEmpty() )
tOut.NamedVal ( "options", m_tOptions );
if ( !m_hIndexes.IsEmpty() )
{
tOut.Named ( "indexes" );
auto _1 = tOut.Array();
for_each ( m_hIndexes, [&tOut] ( const auto& tIndex ) { tOut.String ( tIndex.first ); } );
}
tOut.NamedStringNonEmpty ( "path", m_sPath );
}
//////////////////////////////////////////////////////////////////////////
bool IndexDescDistr_t::Parse ( const bson::Bson_c& tBson, CSphString& sWarning )
{
using namespace bson;
Bson_c ( tBson.ChildByName ( "locals" ) ).ForEach ( [this,&sWarning] ( const NodeHandle_t& tNode ) {
if ( !IsString(tNode) )
{
sWarning = "lists of local tables must only contain strings, skipped";
return;
}
m_dLocals.Add ( String ( tNode ) );
} );
Bson_c ( tBson.ChildByName ( "agents" ) ).ForEach ( [this, &sWarning] ( const NodeHandle_t& tNode ) {
AgentConfigDesc_t& tNew = m_dAgents.Add();
auto tBson = bson::Bson_c { tNode };
tNew.m_sConfig = String ( tBson.ChildByName ( "config" ) );
tNew.m_bBlackhole = Bool ( tBson.ChildByName ( "blackhole" ) );
tNew.m_bPersistent = Bool ( tBson.ChildByName ( "persistent" ) );
} );
m_iAgentConnectTimeout = Int ( tBson.ChildByName( "agent_connect_timeout" ));
m_iAgentQueryTimeout = Int ( tBson.ChildByName ( "agent_query_timeout" ) );
m_iAgentRetryCount = Int ( tBson.ChildByName ( "agent_retry_count" ) );
m_bDivideRemoteRanges = Bool ( tBson.ChildByName ( "divide_remote_ranges" ) );
m_sHaStrategy = String ( tBson.ChildByName ( "ha_strategy" ), {} );
return true;
}
void operator<< ( JsonEscapedBuilder& tOut, const AgentConfigDesc_t& tAgent )
{
auto _ = tOut.ObjectW();
tOut.NamedString ( "config", tAgent.m_sConfig );
tOut.NamedVal ( "blackhole", tAgent.m_bBlackhole );
tOut.NamedVal ( "persistent", tAgent.m_bPersistent );
}
void IndexDescDistr_t::Save ( JsonEscapedBuilder& tOut ) const
{
auto _0 = tOut.ObjectW();
tOut.NamedString ( "type", GetIndexTypeName ( IndexType_e::DISTR ) );
if ( !m_dLocals.IsEmpty() )
{
tOut.Named ( "locals" );
auto _ = tOut.Array();
for_each ( m_dLocals, [&tOut] ( const auto& sNode ) { tOut.String ( sNode ); } );
}
if ( !m_dAgents.IsEmpty() )
{
tOut.Named ( "agents" );
auto _ = tOut.Array();
for_each ( m_dAgents, [&tOut] ( const auto& sNode ) { tOut << sNode; } );
}
tOut.NamedValNonDefault ( "agent_connect_timeout", m_iAgentConnectTimeout, 0 );
tOut.NamedValNonDefault ( "agent_query_timeout", m_iAgentQueryTimeout, 0 );
tOut.NamedValNonDefault ( "agent_retry_count", m_iAgentRetryCount, 0 );
tOut.NamedVal ( "divide_remote_ranges", m_bDivideRemoteRanges );
tOut.NamedStringNonDefault ( "ha_strategy", m_sHaStrategy, {} );
}
void IndexDescDistr_t::Save ( CSphConfigSection & hIndex ) const
{
for ( const auto & i : m_dLocals )
hIndex.AddEntry ( "local", i.cstr() );
for ( const auto & i : m_dAgents )
{
const char * szConf = i.m_sConfig.cstr();
if ( i.m_bBlackhole )
hIndex.AddEntry ( "agent_blackhole", szConf );
else if ( i.m_bPersistent )
hIndex.AddEntry ( "agent_persistent", szConf );
else
hIndex.AddEntry ( "agent", szConf );
}
CSphString sTmp;
if ( m_iAgentConnectTimeout>0 )
hIndex.AddEntry ( "agent_connect_timeout", sTmp.SetSprintf ( "%d", m_iAgentConnectTimeout ).cstr() );
if ( m_iAgentQueryTimeout>0 )
hIndex.AddEntry ( "agent_query_timeout", sTmp.SetSprintf ( "%d", m_iAgentQueryTimeout ).cstr() );
if ( m_iAgentRetryCount > 0 )
hIndex.AddEntry ( "agent_retry_count", sTmp.SetSprintf ( "%d", m_iAgentRetryCount ).cstr() );
hIndex.AddEntry ( "divide_remote_ranges", m_bDivideRemoteRanges ? "1" : "0" );
if ( !m_sHaStrategy.IsEmpty() )
hIndex.AddEntry ( "ha_strategy", m_sHaStrategy.cstr() );
}
//////////////////////////////////////////////////////////////////////////
bool IndexDesc_t::Parse ( const bson::Bson_c& tBson, const CSphString& sName, CSphString& sWarning )
{
using namespace bson;
if ( sName.IsEmpty() )
return TlsMsg::Err ( "empty table name" );
m_sName = sName;
CSphString sType = String ( tBson.ChildByName ( "type" ) );
m_eType = TypeOfIndexConfig ( sType );
if ( m_eType == IndexType_e::ERROR_ )
return TlsMsg::Err ( "type '%s' is invalid", sType.cstr() );
if ( m_eType != IndexType_e::DISTR )
{
m_sPath = String ( tBson.ChildByName ( "path" ) );
MakeRelativePath ( m_sPath );
return true;
}
bool bParseOk = m_tDistr.Parse ( tBson, sWarning );
if ( TlsMsg::HasErr() )
TlsMsg::Err ( "table %s: %s", m_sName.cstr(), TlsMsg::szError() );
if ( !sWarning.IsEmpty() )
sWarning.SetSprintf ( "table %s: %s", m_sName.cstr(), sWarning.cstr() );
return bParseOk;
}
void IndexDesc_t::Save ( JsonEscapedBuilder& tOut ) const
{
tOut.Named ( m_sName.cstr() );
if ( m_eType == IndexType_e::DISTR )
return m_tDistr.Save ( tOut );
auto _ = tOut.ObjectW();
tOut.NamedString ("type", GetIndexTypeName ( m_eType ) );
CSphString sPath = m_sPath;
tOut.NamedString ( "path", StripPath ( sPath ) );
}
void IndexDesc_t::Save ( CSphConfigSection & hIndex ) const
{
hIndex.Add ( CSphVariant ( GetIndexTypeName ( m_eType ) ), "type" );
if ( m_eType==IndexType_e::DISTR )
m_tDistr.Save (hIndex);
else
{
hIndex.Add ( CSphVariant ( m_sPath.cstr() ), "path" );
// dummy
hIndex.Add ( CSphVariant ( "text" ), "rt_field" );
hIndex.Add ( CSphVariant ( "gid" ), "rt_attr_uint" );
}
}
//////////////////////////////////////////////////////////////////////////
// read info about cluster and indexes from manticore.json and validate data
static bool ConfigRead ( const CSphString& sConfigPath, CSphVector<ClusterDesc_t>& dClusters, CSphVector<IndexDesc_t>& dIndexes, CSphString& sError )
{
if ( !sphIsReadable ( sConfigPath, nullptr ) )
return true;
CSphAutoreader tConfigFile;
if ( !tConfigFile.Open ( sConfigPath, sError ) )
return false;
int iSize = (int)tConfigFile.GetFilesize();
if ( !iSize )
return true;
CSphFixedVector<char> dJsonText ( iSize + 2 );
auto iRead = (int64_t)sphReadThrottled ( tConfigFile.GetFD(), dJsonText.begin(), iSize );
if ( iRead != iSize )
return false;
if ( tConfigFile.GetErrorFlag() )
{
sError = tConfigFile.GetErrorMessage();
return false;
}
decltype ( dJsonText )::POLICY_T::Zero ( dJsonText.begin() + iSize, 2 ); // safe gap
CSphVector<BYTE> dBson;
if ( !sphJsonParse ( dBson, dJsonText.begin(), false, false, false, sError ) )
return false;
using namespace bson;
Bson_c tBson ( dBson );
if ( tBson.IsEmpty() || !tBson.IsAssoc() )
{
sError = "Something wrong read from json config - it is either empty, either not root object.";
return false;
}
LoadCompatHttp ( (const char*)dJsonText.Begin() );
Bson_c ( tBson.ChildByName ( "indexes" ) ).ForEach ( [&dIndexes] ( CSphString&& sName, const NodeHandle_t& tNode ) {
Bson_c tNodeBson { tNode };
IndexDesc_t tIndex;
CSphString sWarning;
if ( !tIndex.Parse ( tNodeBson, sName, sWarning ) )
{
sphWarning ( "table '%s'(%d) error: %s", sName.cstr(), dIndexes.GetLength(), TlsMsg::szError() );
return;
}
if ( !sWarning.IsEmpty() )
sphWarning ( "table '%s'(%d) warning: %s", sName.cstr(), dIndexes.GetLength(), sWarning.cstr() );
int iExists = dIndexes.GetFirst ( [&] ( const IndexDesc_t& tItem ) {
return ( tItem.m_sName == tIndex.m_sName || ( ( tItem.m_eType == IndexType_e::PLAIN || tItem.m_eType == IndexType_e::RT ) && tItem.m_sPath == tIndex.m_sPath ) );
} );
if ( iExists != -1 )
{
const IndexDesc_t& tItem = dIndexes[iExists];
sphWarning ( "table with the same %s already exists: %s, %s, SKIPPED", ( tItem.m_sName == tIndex.m_sName ? "name" : "path" ), tIndex.m_sName.scstr(), tIndex.m_sPath.scstr() );
return;
}
dIndexes.Add ( tIndex );
} );
// check clusters
Bson_c ( tBson.ChildByName ( "clusters" ) ).ForEach ( [&dClusters] ( CSphString&& sName, const NodeHandle_t& tNode ) {
Bson_c tNodeBson { tNode };
ClusterDesc_t tCluster;
CSphString sClusterWarning;
bool bParsed = tCluster.Parse ( tNodeBson, sName, sClusterWarning );
if ( !sClusterWarning.IsEmpty() )
sphWarning ( "cluster '%s': %s", tCluster.m_sName.cstr(), sClusterWarning.cstr() );
if ( !bParsed )
{
sphWarning ( "cluster '%s': disabled at JSON config, %s", tCluster.m_sName.cstr(), TlsMsg::szError() );
return;
}
int iExists = dClusters.GetFirst ( [&] ( const ClusterDesc_t& tItem ) { return ( tItem.m_sName == tCluster.m_sName || tItem.m_sPath == tCluster.m_sPath ); } );
if ( iExists != -1 )
{
const ClusterDesc_t& tItem = dClusters[iExists];
sphWarning ( "cluster with the same %s already exists: %s, %s, SKIPPED", ( tItem.m_sName == tCluster.m_sName ? "name" : "path" ), tCluster.m_sName.scstr(), tCluster.m_sPath.scstr() );
return;
}
dClusters.Add ( tCluster );
} );
sphLogDebug ( "config loaded, tables %d, clusters %d", dIndexes.GetLength(), dClusters.GetLength() );
return true;
}
static bool ConfigWrite ( const CSphString & sConfigPath, const CSphVector<ClusterDesc_t> & dClusters, const CSphVector<IndexDesc_t> & dIndexes, CSphString & sError )
{
JsonEscapedBuilder tOut;
tOut.ObjectWBlock();
tOut.Named ( "clusters" );
{
auto _ = tOut.Object();
for_each ( dClusters, [&tOut] ( const auto& tCluster ) { tCluster.Save ( tOut ); } );
}
tOut.Named ( "indexes" );
{
auto _ = tOut.Object();
for_each ( dIndexes, [&tOut] ( const auto& tIndex ) { tIndex.Save ( tOut ); } );
}
SaveCompatHttp ( tOut );
tOut.FinishBlocks();
CSphString sNew, sOld;
auto & sCur = sConfigPath;
sNew.SetSprintf ( "%s.new", sCur.cstr() );
sOld.SetSprintf ( "%s.old", sCur.cstr() );
CSphWriter tConfigFile;
if ( !tConfigFile.OpenFile ( sNew, sError ) )
return false;
tConfigFile.PutString ( (Str_t)tOut );
assert ( bson::ValidateJson ( tOut.cstr() ) );
tConfigFile.CloseFile();
if ( tConfigFile.IsError() )
return false;
if ( sphIsReadable ( sCur, nullptr ) && rename ( sCur.cstr(), sOld.cstr() ) )
{
sError.SetSprintf ( "failed to rename current to old, '%s'->'%s', error '%s'", sCur.cstr(), sOld.cstr(), strerror(errno) );
return false;
}
if ( rename ( sNew.cstr(), sCur.cstr() ) )
{
sError.SetSprintf ( "failed to rename new to current, '%s'->'%s', error '%s'", sNew.cstr(), sCur.cstr(), strerror(errno) );
if ( sphIsReadable ( sOld, nullptr ) && rename ( sOld.cstr(), sCur.cstr() ) )
sError.SetSprintf ( "%s, rollback failed too", sError.cstr() );
return false;
}
unlink ( sOld.cstr() );
sphLogDebug ( "config saved, tables %d, clusters %d", dIndexes.GetLength(), dClusters.GetLength() );
return true;
}
// ClientSession_c::Execute -> HandleMysqlImportTable -> AddExistingIndexConfigless -> ConfiglessPreloadIndex
// ServiceMain -> ConfigureAndPreload -> ConfigureAndPreloadConfiglessIndexes -> ConfiglessPreloadIndex
static ESphAddIndex ConfiglessPreloadIndex ( const IndexDesc_t & tIndex, StrVec_t & dWarnings, CSphString & sError )
{
assert ( IsConfigless() );
CSphConfigSection hIndex;
tIndex.Save(hIndex);
ESphAddIndex eAdd = ConfigureAndPreloadIndex ( hIndex, tIndex.m_sName.cstr(), dWarnings, sError );
if ( eAdd==ADD_ERROR )
dWarnings.Add ( "disabled at the JSON config" );
return eAdd;
}
// load indexes got from JSON config on daemon indexes preload (part of ConfigureAndPreload work done here)
// ServiceMain -> ConfigureAndPreload -> ConfigureAndPreloadConfiglessIndexes
void ConfigureAndPreloadConfiglessIndexes ( int & iValidIndexes, int & iCounter ) REQUIRES ( MainThread )
{
// assume g_dCfgIndexes has all locals, then all distributed. Otherwise, distr with yet invisible local agents will fail to load!
assert ( IsConfigless() );
SccRL_t tCfgRLock { g_tCfgIndexesLock };
for ( const IndexDesc_t & tIndex : g_dCfgIndexes )
{
CSphString sError;
StrVec_t dWarnings;
ESphAddIndex eAdd = ConfiglessPreloadIndex ( tIndex, dWarnings, sError );
iValidIndexes += ( eAdd==ADD_ERROR ? 0 : 1 );
iCounter += ( eAdd== ADD_NEEDLOAD ? 1 : 0 );
for ( const auto & i : dWarnings )
sphWarning ( "table '%s': %s", tIndex.m_sName.cstr(), i.cstr() );
if ( eAdd==ADD_ERROR )
sphWarning ( "table '%s': %s - NOT SERVING", tIndex.m_sName.cstr(), sError.cstr() );
}
}
static bool HasConfigLocal ( const CSphString & sName )
{
SccRL_t tCfgRLock { g_tCfgIndexesLock };
return g_dCfgIndexes.Contains ( bind ( &IndexDesc_t::m_sName ), sName );
}
// collect local indexes that should be saved into JSON config
static void CollectLocalIndexesInt ( CSphVector<IndexDesc_t> & dIndexes )
{
if ( !IsConfigless() )
return;
assert ( g_pLocalIndexes );
ServedSnap_t hLocals = g_pLocalIndexes->GetHash();
for ( const auto & tIt : *hLocals )
{
assert ( tIt.second );
IndexDesc_t & tIndex = dIndexes.Add();
tIndex.m_sName = tIt.first;
tIndex.m_sPath = tIt.second->m_sIndexPath;
tIndex.m_eType = tIt.second->m_eType;
}
SmallStringHash_T<IndexDesc_t*> hConfigLocal;
SccRL_t tCfgRLock { g_tCfgIndexesLock };
for_each ( g_dCfgIndexes, [&hConfigLocal] ( IndexDesc_t& tDesc ) { if (tDesc.m_eType!=IndexType_e::DISTR) hConfigLocal.Add ( &tDesc, tDesc.m_sName ); } );
for_each ( *hLocals, [&hConfigLocal] ( auto& tIt ) { hConfigLocal.Delete ( tIt.first ); } );
// keep indexes loaded from JSON but disabled due to errors
for_each ( hConfigLocal, [&dIndexes] ( auto& tIt ) { assert ( tIt.second); dIndexes.Add ( *tIt.second ); } );
}
// collect distributed indexes that should be saved into JSON config
static void CollectDistIndexesInt ( CSphVector<IndexDesc_t> & dIndexes )
{
if ( !IsConfigless() )
return;
assert ( g_pDistIndexes );
auto pDistSnapshot = g_pDistIndexes->GetHash();
for ( auto& tIt : *pDistSnapshot )
{
IndexDesc_t & tIndex = dIndexes.Add();
tIndex.m_sName = tIt.first;
tIndex.m_eType = IndexType_e::DISTR;
tIndex.m_tDistr = GetDistributedDesc ( *tIt.second );
}
}
std::unique_ptr<FilenameBuilder_i> CreateFilenameBuilder ( const char * szIndex )
{
if ( IsConfigless() )
return std::make_unique<FilenameBuilder_c>(szIndex);
return nullptr;
}
static bool SetupConfiglessMode ( const CSphConfig & hConf, const CSphString & sConfigFile, CSphString & sError )
{
const CSphConfigSection & hSearchd = hConf["searchd"]["searchd"];
if ( !hSearchd.Exists("data_dir") )
return false;
g_sDataDir = hSearchd["data_dir"].strval();
if ( !sphDirExists ( g_sDataDir.cstr(), &sError ) )
{
sError.SetSprintf ( "%s; make sure it is accessible or remove data_dir from the config file", sError.cstr() );
return false;
}
if ( hConf.Exists("index") )
{
sError.SetSprintf ( "'data_dir' cannot be mixed with table declarations in '%s'", sConfigFile.cstr() );
return false;
}
if ( hConf.Exists("source") )
{
sError.SetSprintf ( "'data_dir' cannot be mixed with source declarations in '%s'", sConfigFile.cstr() );
return false;
}
SetIndexFilenameBuilder ( CreateFilenameBuilder );
return true;
}
static const char * g_sJsonConfName = "manticore.json";
// load data from JSON config on daemon start
bool LoadConfigInt ( const CSphConfig & hConf, const CSphString & sConfigFile, CSphString & sError ) REQUIRES (MainThread)
{
const CSphConfigSection & hSearchd = hConf["searchd"]["searchd"];
g_sLogFile = hSearchd.GetStr ( "log", "" );
g_bConfigless = hSearchd.Exists("data_dir");
if ( !g_bConfigless )
return true;
if ( !SetupConfiglessMode ( hConf, sConfigFile, sError ) )
return false;
// node with empty incoming addresses works as GARB - does not affect FC
// might hung on pushing 1500 transactions
ReplicationSetIncoming ( hSearchd.GetStr ( "node_address" ) );
// check data_dir exists and could write there
if ( !CheckPath ( g_sDataDir, true, sError ) )
return false;
g_sConfigPath.SetSprintf ( "%s/%s", g_sDataDir.cstr(), g_sJsonConfName );
// check that file is readable in case it exists
if ( sphFileExists ( g_sConfigPath.cstr(), nullptr ) && !sphIsReadable ( g_sConfigPath.cstr(), &sError ) )
{
sError.SetSprintf ( "failed to use JSON config %s: %s", g_sConfigPath.cstr(), sError.cstr() );
return false;
}
SccWL_t tCfgWLock { g_tCfgIndexesLock };
// reset index and cluster descs on restart (watchdog resurrect)
g_dCfgClusters.Reset();
g_dCfgIndexes.Reset();
if ( !ConfigRead ( g_sConfigPath, g_dCfgClusters, g_dCfgIndexes, sError ) )
{
sError.SetSprintf ( "failed to use JSON config %s: %s", g_sConfigPath.cstr(), sError.cstr() );
return false;
}
return true;
}
bool SaveConfigInt ( CSphString & sError )
{
return Threads::CallCoroutineRes ( [&sError]
{
ScopedCoroMutex_t tSaving ( g_tSaveInProgress );
if ( !ReplicationEnabled() && !IsConfigless() )
return true;
// save clusters and their indexes into JSON config on daemon shutdown
auto dClusters = ReplicationCollectClusters ();
CSphVector<IndexDesc_t> dIndexes;
CollectLocalIndexesInt ( dIndexes );
CollectDistIndexesInt ( dIndexes );
if ( !ConfigWrite ( g_sConfigPath, dClusters, dIndexes, sError ) )
{
sphWarning ( "%s", TlsMsg::szError() );
return false;
}
return true;
});
}
//////////////////////////////////////////////////////////////////////////
static bool PrepareDirForNewIndex ( CSphString & sPath, CSphString & sIndexPath, const CSphString & sIndexName, CSphString & sError )
{
CSphString sNewPath = GetPathForNewIndex(sIndexName);
StringBuilder_c sRes;
sRes << sNewPath;
if ( sphDirExists ( sRes.cstr() ) )
{
StringBuilder_c sMask;
sMask << sRes.cstr() << "/*";
StrVec_t dFiles = FindFiles ( sMask.cstr() );
if ( dFiles.GetLength() )
{
bool bLockFileLeft = dFiles.GetLength()==1 && dFiles[0].Ends (".lock");
if ( !bLockFileLeft )
{
sError.SetSprintf ( "directory is not empty: %s", sRes.cstr() );
return false;
}
}
}
else if ( !MkDir ( sRes.cstr() ) )
{
sError.SetSprintf ( "can't create directory: %s", sRes.cstr() );
return false;
}
sRes << "/";
sPath = sRes.cstr();
sRes << sIndexName;
sIndexPath = sRes.cstr();
return true;
}
using OptInt_t = std::optional<int>;
using CopiedFiles = SmallStringHash_T<CSphSavedFile>;
static bool CopyExternalFile ( const CSphString & sSrcPath, const CSphString & sDstPath, const char * sDstName, OptInt_t iPostfix, OptInt_t iChunk, CSphSavedFile & tExternal, CopiedFiles & hCopied, CSphString & sError )
{
CSphString sFromFile = tExternal.m_sFilename;
const CSphSavedFile * pDstExternal = hCopied ( sFromFile );
if ( pDstExternal )
{
tExternal = *pDstExternal;
return true;
}
assert ( sDstName && !sSrcPath.IsEmpty() && !sDstPath.IsEmpty() && !tExternal.m_sFilename.IsEmpty() );
StringBuilder_c sFile;
sFile << sDstName;
if ( iPostfix.has_value() )
sFile.Appendf ( "_chunk%d", iPostfix.value() );
if ( iChunk.has_value() )
sFile.Appendf ( "_%d", iChunk.value() );
sFile << ".txt";
CSphString sDst;
sDst.SetSprintf ( "%s%s", sDstPath.cstr(), sFile.cstr() );
CSphString sSrc;
if ( !IsPathAbsolute ( tExternal.m_sFilename ) )
sSrc.SetSprintf ( "%s%s", sSrcPath.cstr(), tExternal.m_sFilename.cstr() );
else
sSrc = tExternal.m_sFilename;
if ( !CopyFile ( sSrc.cstr(), sDst.cstr(), sError ) )
return false;
if ( !tExternal.Collect ( sDst.cstr(), &sError ) )
return false;
tExternal.m_sFilename = sFile.cstr();
hCopied.Add ( tExternal, sFromFile );
return true;
}
static JsonObj_c DumpFileInfoWoName ( const CSphSavedFile & tInfo )
{
JsonObj_c tInfoDump;
tInfoDump.AddUint ( "size", tInfo.m_uSize );
tInfoDump.AddUint ( "ctime", tInfo.m_uCTime );
tInfoDump.AddUint ( "mtime", tInfo.m_uMTime );
tInfoDump.AddUint ( "crc32", tInfo.m_uCRC32 );
return tInfoDump;
}
static JsonObj_c DumpFileInfo ( const CSphSavedFile & tInfo )
{
JsonObj_c tFileDump;
tFileDump.AddStr ( "name", tInfo.m_sFilename );
JsonObj_c tInfoDump;
tInfoDump.AddUint ( "size", tInfo.m_uSize );
tInfoDump.AddUint ( "ctime", tInfo.m_uCTime );
tInfoDump.AddUint ( "mtime", tInfo.m_uMTime );
tInfoDump.AddUint ( "crc32", tInfo.m_uCRC32 );
tFileDump.AddItem ( "info", tInfoDump );
return tFileDump;
}
static bool CopyExceptions ( const CSphString & sSrcPath, const CSphString & sDstPath, OptInt_t iPostfix, JsonObj_c & tTokSettings, CopiedFiles & hCopied, CSphString & sError )
{
assert ( tTokSettings && tTokSettings.HasItem( "synonyms_file" ) && tTokSettings.GetItem( "synonyms_file" ).IsStr() );
CSphSavedFile tExceptions;
tExceptions.m_sFilename = tTokSettings.GetItem( "synonyms_file" ).StrVal();
if ( !CopyExternalFile ( sSrcPath, sDstPath, "exceptions", iPostfix, std::nullopt, tExceptions, hCopied, sError ) )
return false;
tTokSettings.DelItem ( "synonyms_file" );
tTokSettings.DelItem ( "syn_file_info" );
tTokSettings.AddStr ( "synonyms_file", tExceptions.m_sFilename );
JsonObj_c tFileInfo = DumpFileInfoWoName ( tExceptions );
tTokSettings.AddItem ( "syn_file_info", tFileInfo );
return true;
}
// copy multiple externals from the array object
static std::optional<JsonObj_c> CopyExternalFilesArray ( const CSphString & sSrcPath, const CSphString & sDstPath, OptInt_t iPostfix, const JsonObj_c & tInfos, const char * sItemName, CopiedFiles & hCopied, CSphString & sError)
{
JsonObj_c tResArray ( true );
for ( int i=0; i<tInfos.Size(); i++ )
{
JsonObj_c tItemName = tInfos[i].GetItem ( "name" );
if ( !tItemName || !tItemName.IsStr() )
{
sError.SetSprintf ( "invalid %s", sItemName );
return std::nullopt;
}
CSphSavedFile tFileInfo;
tFileInfo.m_sFilename = tItemName.StrVal();
if ( !CopyExternalFile ( sSrcPath, sDstPath, sItemName, iPostfix, i, tFileInfo, hCopied, sError ) )
return std::nullopt;
JsonObj_c tFileItem = DumpFileInfo ( tFileInfo );
tResArray.AddItem ( tFileItem );
}
return tResArray;
}
static bool CopyWordforms ( const CSphString & sSrcPath, const CSphString & sDstPath, OptInt_t iPostfix, JsonObj_c & tDictSettings, CopiedFiles & hCopied, CSphString & sError )
{
assert ( tDictSettings && tDictSettings.HasItem( "wordforms_file_infos" ) && tDictSettings.GetItem( "wordforms_file_infos" ).IsArray() && tDictSettings.GetItem( "wordforms_file_infos" ).Size()>0 );
auto tWfDst = CopyExternalFilesArray ( sSrcPath, sDstPath, iPostfix, tDictSettings.GetItem( "wordforms_file_infos" ), "wordforms", hCopied, sError );
if ( !tWfDst.has_value() )
return false;
tDictSettings.DelItem ( "wordforms_file_infos" );
tDictSettings.AddItem ( "wordforms_file_infos", tWfDst.value() );
return true;
}
static bool CopyStopwords ( const CSphString & sSrcPath, const CSphString & sDstPath, OptInt_t iPostfix, JsonObj_c & tDictSettings, CopiedFiles & hCopied, CSphString & sError )
{
assert ( tDictSettings );
// could by just stopwords without stopwords_file_infos
if ( tDictSettings && !tDictSettings.HasItem( "stopwords_file_infos" ) )
{
assert ( tDictSettings.HasItem( "stopwords" ) && tDictSettings.GetItem( "stopwords" ).IsStr() );
CSphString sStopwords = tDictSettings.GetItem( "stopwords" ).StrVal();
StrVec_t dStops = sphSplit ( sStopwords.cstr(), sStopwords.Length(), " \t," );
JsonObj_c tStopwordsInfo ( true );
for ( const CSphString & sFile : dStops )
{
JsonObj_c tInfo;
tInfo.AddStr ( "name", sFile );
tStopwordsInfo.AddItem ( tInfo );
};
tDictSettings.AddItem ( "stopwords_file_infos", tStopwordsInfo );
}
assert ( tDictSettings.HasItem( "stopwords_file_infos" ) && tDictSettings.GetItem( "stopwords_file_infos" ).IsArray() && tDictSettings.GetItem( "stopwords_file_infos" ).Size()>0 );
auto tStopsDst = CopyExternalFilesArray ( sSrcPath, sDstPath, iPostfix, tDictSettings.GetItem( "stopwords_file_infos" ), "stopwords", hCopied, sError );
if ( !tStopsDst.has_value() )
return false;
StringBuilder_c sStopwords ( " " );
for ( const JsonObj_c tFile : tStopsDst.value() )
{
assert ( tFile.HasItem ( "name" ) && tFile.GetItem ( "name" ).IsStr() );
sStopwords << tFile.GetItem ( "name" ).SzVal();
}
tDictSettings.DelItem ( "stopwords" );
tDictSettings.AddStr ( "stopwords", sStopwords.cstr() );
tDictSettings.DelItem ( "stopwords_file_infos" );
tDictSettings.AddItem ( "stopwords_file_infos", tStopsDst.value() );
return true;
}
static bool CopyJiebaDict ( const CSphString & sSrcPath, const CSphString & sDstPath, OptInt_t iPostfix, JsonObj_c & tHeader, CopiedFiles & hCopied, CSphString & sError )
{
const char * szDict = "jieba_user_dict_path";
assert ( tHeader && tHeader.HasItem(szDict) && tHeader.GetItem(szDict).IsStr() );
CSphSavedFile tJiebaDict;
tJiebaDict.m_sFilename = tHeader.GetItem(szDict).StrVal();
if ( !CopyExternalFile ( sSrcPath, sDstPath, szDict, iPostfix, std::nullopt, tJiebaDict, hCopied, sError ) )
return false;
tHeader.DelItem(szDict);
tHeader.AddStr ( szDict, tJiebaDict.m_sFilename );
return true;
}
static std::optional<JsonObj_c> ReadJsonHeader ( const CSphString & sFilename, CSphString & sError )
{
CSphAutofile tFile;
if ( tFile.Open ( sFilename, SPH_O_READ, sError )<0 )
return std::nullopt;
int64_t iSize = tFile.GetSize();
CSphFixedVector<char> sMeta { iSize + 2 }; // and zero-gap at the end
if ( !tFile.Read ( sMeta.Begin(), iSize, sError ) )
return std::nullopt;
JsonObj_c tMeta ( sMeta );
if ( tMeta.GetError ( sMeta.Begin(), iSize, sError ) )
return std::nullopt;
return tMeta;
}
static bool WriteJsonHeader ( const CSphString & sFilename, const JsonObj_c & tMeta, CSphString & sError )
{
CSphAutofile tFile;
if ( tFile.Open ( sFilename, SPH_O_NEW, sError, true )<0 )
return false;
CSphString sDump = tMeta.AsString ( true );
if ( !sphWriteThrottled ( tFile.GetFD(), sDump.cstr(), sDump.Length(), sFilename.cstr(), sError ) )
return false;
tFile.SetPersistent();
return true;
}
// check for any stopwords, wordforms or exceptions and copy all avaliable
static bool CopyExternalsFromHeader ( const CSphString & sSrcPath, const CSphString & sDstIndex, OptInt_t iPostfix, JsonObj_c & tHeader, CopiedFiles & hCopied, CSphString & sError )
{
JsonObj_c tTokSettings = tHeader.GetItem ( "tokenizer_settings" );
JsonObj_c tDictSettings = tHeader.GetItem ( "dictionary_settings" );
bool bHasExceptions = tTokSettings && tTokSettings.HasItem( "synonyms_file" ) && tTokSettings.GetItem( "synonyms_file" ).IsStr();
bool bHasStopwords = false;
if ( tDictSettings && tDictSettings.HasItem( "stopwords_file_infos" ) )
{
JsonObj_c tStopwords = tDictSettings.GetItem( "stopwords_file_infos" );
bHasStopwords = ( tStopwords.IsArray() && tStopwords.Size()>0 );
}
if ( !bHasStopwords && tDictSettings.HasItem( "stopwords" ) )
bHasStopwords = tDictSettings.GetItem( "stopwords" ).IsStr();
bool bHasWordforms = false;
if ( tDictSettings && tDictSettings.HasItem( "wordforms_file_infos" ) )
{
JsonObj_c tWordforms = tDictSettings.GetItem( "wordforms_file_infos" );
bHasWordforms = ( tWordforms.IsArray() && tWordforms.Size()>0 );
}
bool bHasJiebaDict = tHeader.HasItem("jieba_user_dict_path");
if ( !bHasExceptions && !bHasStopwords && !bHasWordforms && !bHasJiebaDict )
return true;
CSphString sDstPath = GetPathOnly ( sDstIndex );
if ( bHasExceptions && !CopyExceptions ( GetPathOnly ( sSrcPath ), sDstPath, iPostfix, tTokSettings, hCopied, sError ) )
return false;
if ( bHasStopwords && !CopyStopwords ( GetPathOnly ( sSrcPath ), sDstPath, iPostfix, tDictSettings, hCopied, sError ) )
return false;
if ( bHasWordforms && !CopyWordforms ( GetPathOnly ( sSrcPath ), sDstPath, iPostfix, tDictSettings, hCopied, sError ) )
return false;
if ( bHasJiebaDict && !CopyJiebaDict ( GetPathOnly ( sSrcPath ), sDstPath, iPostfix, tHeader, hCopied, sError ) )
return false;
return true;
}
// copy external from the either .meta or .sph
static bool CopyExternal ( const CSphString & sSrcPath, const CSphString & sDstIndex, const CSphString & sHeaderName, OptInt_t iPostfix, CopiedFiles & hCopied, CSphString & sError )
{
std::optional<JsonObj_c> tRes = ReadJsonHeader ( sHeaderName, sError );
if ( !tRes )
return false;
JsonObj_c & tMeta = tRes.value();
// copy external from the RAM part
if ( !CopyExternalsFromHeader ( sSrcPath, sDstIndex, iPostfix, tMeta, hCopied, sError ) )
return false;
// copy external from the disk chunks
if ( tMeta.HasItem ( "chunk_names" ) && tMeta.GetItem ( "chunk_names" ).IsArray() && tMeta.GetItem ( "chunk_names" ).Size()>0 )
{
JsonObj_c tChunks = tMeta.GetItem ( "chunk_names" );
for ( const JsonObj_c & tChunk : tChunks )
{
if ( !tChunk.IsInt() )
{
sError.SetSprintf ( "invalid chunk: %s", tChunk.AsString().cstr() );
return false;
}
int iChunk = (int)tChunk.IntVal();
CSphString sChunkName;
sChunkName.SetSprintf ( "%s.%d.sph", sDstIndex.cstr(), iChunk );
if ( !CopyExternal ( sSrcPath, sDstIndex, sChunkName, iChunk, hCopied, sError ) )
return false;
}
}
if ( !WriteJsonHeader ( sHeaderName, tMeta, sError ) )
return false;
return true;
}
// remove index_id from the .meta to prevent duplicate of active indexes
static bool CleanupHeader ( const CSphString & sHeaderName, bool & bPQ, CSphString & sError )
{
std::optional<JsonObj_c> tRes = ReadJsonHeader ( sHeaderName, sError );
if ( !tRes )
return false;
JsonObj_c & tMeta = tRes.value();
if ( tMeta.HasItem ( "index_id" ) )
tMeta.DelItem ( "index_id" );
bPQ = tMeta.HasItem ( "pqs" );
if ( !WriteJsonHeader ( sHeaderName, tMeta, sError ) )
return false;
return true;
}
bool CopyIndexFiles ( const CSphString & sIndex, const CSphString & sPathToIndex, bool & bPQ, StrVec_t & dWarnings, CSphString & sError )
{
CSphString sPath, sNewIndexPath;
if ( !PrepareDirForNewIndex ( sPath, sNewIndexPath, sIndex, sError ) )
return false;
StrVec_t dWipe;
CopiedFiles hCopied;
auto tCleanup = AtScopeExit ( [&dWipe, &hCopied, &sNewIndexPath]
{
dWipe.for_each ( [] ( const auto& i ) { unlink ( i.cstr() ); } );
for ( const auto & tItem : hCopied )
{
CSphString sName;
sName.SetSprintf ( "%s%s", sNewIndexPath.cstr(), tItem.second.m_sFilename.cstr() );
unlink ( sName.cstr() );
};
});
CSphString sFind;
sFind.SetSprintf ( "%s.*", sPathToIndex.cstr() );
StrVec_t dFoundFiles = FindFiles ( sFind.cstr(), false );
// checks for source index
if ( !dFoundFiles.GetLength() )
{
sError = "no table files found";
return false;
}
if ( !dFoundFiles.any_of ( [] ( const CSphString & sFile ) { return sFile.Ends ( ".meta" ); } ) )
{
sError.SetSprintf ( "missing %s.meta table file", sPathToIndex.cstr() );
return false;
}
for ( const auto & i : dFoundFiles )
{
CSphString sDest;
const char * sExt = GetExtension(i);
if ( !sExt || StrEq ( sExt, "spl" ) || StrEq ( sExt, "lock" ) )
continue;
sDest.SetSprintf ( "%s.%s", sNewIndexPath.cstr(), sExt );
if ( !CopyFile ( i, sDest, sError ) )
return false;
dWipe.Add(sDest);
}
CSphString sMetaName;
sMetaName.SetSprintf ( "%s.meta", sNewIndexPath.cstr() );
if ( !CopyExternal ( sPathToIndex, sNewIndexPath, sMetaName, std::nullopt, hCopied, sError ) )
return false;
if ( !CleanupHeader ( sMetaName, bPQ, sError ) )
return false;
dWipe.Reset();
hCopied.Reset();
return true;
}
static bool CheckCreateTableSettings ( const CreateTableSettings_t & tCreateTable, CSphString & sError )
{
static const char * dForbidden[] = { "path", "stored_fields", "stored_only_fields", "columnar_attrs", "columnar_no_fast_fetch", "rowwise_attrs", "rt_field", "embedded_limit", "knn", "json_secondary_indexes" };
static const char * dTypes[] = { "rt", "pq", "percolate", "distributed" };
for ( const auto & i : tCreateTable.m_dOpts )
{
for ( const auto & j : dForbidden )
if ( i.m_sName==j )
{
sError.SetSprintf ( "setting not allowed: %s='%s'", i.m_sName.cstr(), i.m_sValue.cstr() );
return false;
}
for ( int j = 0; j < GetNumRtTypes(); j++ )
if ( i.m_sName==GetRtType(j).m_szName )
{
sError.SetSprintf ( "setting not allowed: %s='%s'", i.m_sName.cstr(), i.m_sValue.cstr() );
return false;
}
if ( i.m_sName=="type" )
{
bool bFound = false;
for ( const auto & j : dTypes )
bFound |= i.m_sValue==j;
if ( !bFound )
{
sError.SetSprintf ( "setting not allowed: %s='%s'", i.m_sName.cstr(), i.m_sValue.cstr() );
return false;
}
}
}
return true;
}
CSphString BuildCreateTableDistr ( const CSphString & sName, const DistributedIndex_t & tDistr )
{
StringBuilder_c sRes(" ");
sRes << "CREATE TABLE" << sName << "type='distributed'";
for ( const auto & i : tDistr.m_dLocal )
{
CSphString sLocal;
sRes << sLocal.SetSprintf ( "local='%s'", i.cstr() );
}
for ( const auto& i : tDistr.m_dAgents )
{
CSphString sAgent;
if ( !i || !i->GetLength() )
continue;
if ( (*i)[0].m_bBlackhole )
sAgent = "agent_blackhole";
else if ( (*i)[0].m_bPersistent )
sAgent = "agent_persistent";
else
sAgent = "agent";
sRes << sAgent.SetSprintf ( "%s='%s'", sAgent.cstr(), i->GetConfigStr().cstr() );
}
DistributedIndexRefPtr_t pDefault ( new DistributedIndex_t );
CSphString sOpt;
if ( tDistr.GetAgentConnectTimeoutMs ( true )!=pDefault->GetAgentConnectTimeoutMs ( true ) )
sRes << sOpt.SetSprintf ( "agent_connect_timeout='%d'", tDistr.GetAgentConnectTimeoutMs ( true ) );
if ( tDistr.GetAgentQueryTimeoutMs ( true )!=pDefault->GetAgentQueryTimeoutMs ( true ) )
sRes << sOpt.SetSprintf ( "agent_query_timeout='%d'", tDistr.GetAgentQueryTimeoutMs ( true ) );
if ( tDistr.m_iAgentRetryCount!=pDefault->m_iAgentRetryCount )
sRes << sOpt.SetSprintf ( "agent_retry_count='%d'", tDistr.m_iAgentRetryCount );
if ( tDistr.m_bDivideRemoteRanges!=pDefault->m_bDivideRemoteRanges )
sRes << sOpt.SetSprintf ( "divide_remote_ranges='%d'", tDistr.m_bDivideRemoteRanges ? 1 : 0 );
if ( tDistr.m_eHaStrategy!=pDefault->m_eHaStrategy )
sRes << sOpt.SetSprintf ( "ha_strategy='%s'", HAStrategyToStr ( tDistr.m_eHaStrategy ).cstr() );
return sRes.cstr();
}
static void DeleteExtraIndexFiles ( CSphIndex * pIndex, const StrVec_t * pExtFiles )
{
assert(pIndex);
CSphString sTmp;
CSphString sPath = GetPathForNewIndex ( pIndex->GetName() );
auto pTokenizer = pIndex->GetTokenizer();
auto pDict = pIndex->GetDictionary();
if ( pTokenizer )
{
// single file
const CSphString & sExceptions = pTokenizer->GetSettings().m_sSynonymsFile;
if ( sExceptions.Length() )
{
sTmp.SetSprintf ( "%s/%s", sPath.cstr(), sExceptions.cstr() );
::unlink ( sTmp.cstr() );
}
}
if ( pDict )
{
// space-separated string
const CSphString & sStopwords = pDict->GetSettings().m_sStopwords;
StrVec_t dStops = sphSplit ( sStopwords.cstr(), sStopwords.Length(), " \t," );
for ( const auto & i : dStops )
{
sTmp.SetSprintf ( "%s/%s", sPath.cstr(), i.cstr() );
::unlink ( sTmp.cstr() );
}
// array
for ( const auto & i : pDict->GetSettings().m_dWordforms )
{
sTmp.SetSprintf ( "%s/%s", sPath.cstr(), i.cstr() );
::unlink ( sTmp.cstr() );
}
}
// also remove external files from disk chunks (could be another list of files not same as RT index itself)
if ( pExtFiles )
{
for ( const auto & sName : *pExtFiles )
{
if ( sphIsReadable ( sName ) )
::unlink ( sName.cstr() );
}
}
}
static void DeleteRtIndex ( CSphIndex * pIdx, const StrVec_t * pExtFiles )
{
assert ( IsConfigless() );
if ( !pIdx->IsRT() && !pIdx->IsPQ() )
return;
auto pRt = static_cast<RtIndex_i*> ( pIdx );
pRt->IndexDeleted();
DeleteExtraIndexFiles ( pRt, pExtFiles );
}
static void FixupIndexTID ( CSphIndex * pIdx, int64_t iTID )
{
assert ( IsConfigless () );
if ( pIdx && ( pIdx->IsRT () || pIdx->IsPQ () ) && pIdx->m_iTID!=-1 )
pIdx->m_iTID = iTID;
}
static void RemoveAndDeleteRtIndex ( const CSphString& sIndex )
{
assert ( IsConfigless() );
cServedIndexRefPtr_c pServed = GetServed ( sIndex );
if ( !pServed )
return;
WIdx_c pIdx { pServed };
DeleteRtIndex ( pIdx, nullptr );
g_pLocalIndexes->Delete ( sIndex );
}
bool CreateNewIndexConfigless ( const CSphString & sIndex, const CreateTableSettings_t & tCreateTable, StrVec_t & dWarnings, CSphString & sError )
{
assert ( IsConfigless() );
if ( tCreateTable.m_bIfNotExists && IndexIsServed ( sIndex ) )
return true;
if ( !CheckCreateTableSettings ( tCreateTable, sError ) )
return false;
std::unique_ptr<IndexSettingsContainer_i> pSettingsContainer { CreateIndexSettingsContainer() };
if ( !pSettingsContainer->Populate ( tCreateTable, true ) )
{
sError = pSettingsContainer->GetError();
return false;
}
if ( pSettingsContainer->Get ( "type" )!="distributed")
{
CSphString sPath, sIndexPath;
if ( !PrepareDirForNewIndex ( sPath, sIndexPath, sIndex, sError ) )
{
if ( HasConfigLocal ( sIndex ) )
sError.SetSprintf ( "%s (the table may be corrupted, refer to Manticore log)", sError.cstr() );
return false;
}
pSettingsContainer->Add ( "path", sIndexPath );
if ( !pSettingsContainer->CopyExternalFiles ( sPath, 0 ) )
{
sError = pSettingsContainer->GetError();
return false;
}
}
const CSphConfigSection & hCfg = pSettingsContainer->AsCfg();
ESphAddIndex eAdd;
ServedIndexRefPtr_c pDesc;
std::tie ( eAdd, pDesc ) = AddIndex ( sIndex.cstr(), hCfg, true, true, &dWarnings, sError );
switch ( eAdd )
{
case ADD_ERROR: return false;
case ADD_NEEDLOAD:
{
assert ( pDesc );
if ( !pDesc->m_sGlobalIDFPath.IsEmpty() && !sph::PrereadGlobalIDF ( pDesc->m_sGlobalIDFPath, sError ) )
dWarnings.Add ( "global IDF unavailable - IGNORING" );
FixupIndexTID ( UnlockedHazardIdxFromServed ( *pDesc ), Binlog::LastTidFor ( sIndex ) );
if ( !PreallocNewIndex ( *pDesc, &hCfg, sIndex.cstr(), dWarnings, sError ) )
{
DeleteRtIndex ( UnlockedHazardIdxFromServed ( *pDesc ), nullptr );
return false;
}
}
// no break
case ADD_SERVED:
g_pLocalIndexes->Add ( pDesc, sIndex );
case ADD_DISTR:
default:
break;
}
if ( SaveConfigInt ( sError ) )
{
pSettingsContainer->ResetCleanup();
return true;
}
cServedIndexRefPtr_c pServed = GetServed ( sIndex );
if ( pServed )
RemoveAndDeleteRtIndex ( sIndex );
else
g_pDistIndexes->Delete ( sIndex );
return false;
}
class ScopedCleanup_c
{
public:
explicit ScopedCleanup_c ( CSphString sIndex )
: m_sIndex ( std::move ( sIndex ) )
{}
void Ok()
{
m_bOk = true;
}
~ScopedCleanup_c()
{
if ( m_bOk )
return;
RemoveAndDeleteRtIndex ( m_sIndex );
}
private:
CSphString m_sIndex;
bool m_bOk = false;
};
// ClientSession_c::Execute -> HandleMysqlImportTable -> AddExistingIndexConfigless
bool AddExistingIndexConfigless ( const CSphString & sIndex, IndexType_e eType, StrVec_t & dWarnings, CSphString & sError )
{
assert ( IsConfigless() );
ScopedCleanup_c tCleanup ( sIndex );
IndexDesc_t tNewIndex;
tNewIndex.m_eType = eType;
tNewIndex.m_sName = sIndex;
tNewIndex.m_sPath.SetSprintf ( "%s/%s", GetPathForNewIndex(sIndex).cstr(), sIndex.cstr() );
if ( ConfiglessPreloadIndex ( tNewIndex, dWarnings, sError )!= ADD_NEEDLOAD )
return false;
if ( !SaveConfigInt ( sError ) )
return false;
tCleanup.Ok();
return true;
}
static bool DropDistrIndex ( const CSphString & sIndex, CSphString & sError )
{
assert ( IsConfigless() );
auto pDistr = GetDistr(sIndex);
if ( !pDistr )
{
sError.SetSprintf ( "DROP TABLE failed: unknown distributed table '%s'", sIndex.cstr() );
return false;
}
if ( !pDistr->m_sCluster.IsEmpty() )
{
sError.SetSprintf ( "DROP TABLE failed: unable to drop a cluster table '%s'", sIndex.cstr() );
return false;
}
g_pDistIndexes->Delete(sIndex);
return true;
}
static void RemoveConfigIndex ( const CSphString & sIndex )
{
g_pLocalIndexes->Delete ( sIndex );
// also remove index from list of indexes at the JSON config
SccWL_t tCfgWLock { g_tCfgIndexesLock };
int iIdx = g_dCfgIndexes.GetFirst ( [&] ( const IndexDesc_t & tIdx ) { return tIdx.m_sName==sIndex; } );
if ( iIdx>=0 )
g_dCfgIndexes.Remove ( iIdx );
}
static sph::StringSet g_dAllowedExt = { "meta", "ram", "settings", "lock"};
static bool ReportEmptyDir ( const CSphString & sIndexName, CSphString * pMsg )
{
if ( !pMsg )
return true;
CSphString sIndexPath = GetPathForNewIndex ( sIndexName );
CSphString sSearchPath;
sSearchPath.SetSprintf ( "%s/*", sIndexPath.cstr() );
StrVec_t dFiles = FindFiles ( sSearchPath.cstr(), false );
// no files in the index dir is ok
if ( dFiles.IsEmpty() )
return true;
// some files removed at RT index dtor are ok
bool bAllAllowed = dFiles.all_of ( [&] ( const CSphString & sFile )
{
const char * sExt = GetExtension ( sFile );
return g_dAllowedExt[sExt];
});
if ( bAllAllowed )
return true;
StringBuilder_c sFiles ( ", " );
dFiles.for_each ( [&] ( CSphString & sFile)
{
const char * sExt = GetExtension ( sFile );
if ( !g_dAllowedExt[sExt] )
sFiles += StripPath ( sFile ).cstr();
});
sphWarning ( "index %s directory '%s' is not empty after table drop, clean up files manually: %s", sIndexName.cstr(), sIndexPath.cstr(), sFiles.cstr() );
pMsg->SetSprintf ( "index %s directory '%s' is not empty after table drop, clean up files manually: %s", sIndexName.cstr(), sIndexPath.cstr(), sFiles.cstr() );
return false;
}
static bool DropLocalIndex ( const CSphString & sIndex, CSphString & sError, CSphString * pWarning )
{
assert ( IsConfigless() );
auto pServed = GetServed(sIndex);
if ( !pServed )
{
sError.SetSprintf ( "DROP TABLE failed: unknown local table '%s'", sIndex.cstr() );
return false;
}
if ( ServedDesc_t::IsCluster ( pServed ) )
{
sError.SetSprintf ( "DROP TABLE failed: unable to drop a cluster table '%s'", sIndex.cstr() );
return false;
}
// need to stop all long time write operation at the index as it will be dropped anyway
{
RIdx_T<RtIndex_i*> pRt ( pServed );
pRt->ProhibitSave();
}
// scope for index removal on exit
{
WIdx_T<RtIndex_i*> pRt { pServed };
if ( !pRt )
{
sError.SetSprintf ( "DROP TABLE failed: unknown local table '%s'", sIndex.cstr() );
return false;
}
// need to collect all external files prior to truncate as disk chunks could have different options for externals
StrVec_t dIndexFiles;
StrVec_t dExtFiles;
pRt->GetIndexFiles ( dIndexFiles, dExtFiles );
if ( !pRt->Truncate(sError, RtIndex_i::DROP ) )
return false;
DeleteRtIndex ( pRt, &dExtFiles );
RemoveConfigIndex ( sIndex );
}
ReportEmptyDir ( sIndex, pWarning );
return true;
}
bool DropIndexInt ( const CSphString & sIndex, bool bIfExists, CSphString & sError, CSphString * pWarning )
{
assert ( IsConfigless() );
bool bLocal = GetServed ( sIndex );
bool bDistr = GetDistr ( sIndex );
if ( bDistr )
{
if ( !DropDistrIndex ( sIndex, sError ) )
return false;
}
else if ( bLocal )
{
if ( !DropLocalIndex ( sIndex, sError, pWarning ) )
return false;
}
else
{
if ( bIfExists )
return true;
sError.SetSprintf ( "DROP TABLE failed: unknown table '%s'", sIndex.cstr() );
return false;
}
// we are unable to roll back the drop at this point
if ( !SaveConfigInt(sError) )
{
sError.SetSprintf ( "DROP TABLE failed for table '%s': %s", sIndex.cstr(), sError.cstr() );
return false;
}
return true;
}
//////////////////////////////////////////////////////////////////////////
RunIdx_e IndexIsServed ( const CSphString& sName )
{
if ( g_pLocalIndexes && g_pLocalIndexes->Contains ( sName ) )
return LOCAL;
if ( g_pDistIndexes && g_pDistIndexes->Contains ( sName ) )
return DISTR;
return NOTSERVED;
}
IndexDescDistr_t GetDistributedDesc ( const DistributedIndex_t & tDist )
{
IndexDescDistr_t tIndex;
tIndex.m_dLocals = tDist.m_dLocal;
tIndex.m_iAgentConnectTimeout = tDist.GetAgentConnectTimeoutMs ( true );
tIndex.m_iAgentQueryTimeout = tDist.GetAgentQueryTimeoutMs ( true );
tIndex.m_iAgentRetryCount = tDist.m_iAgentRetryCount;
tIndex.m_bDivideRemoteRanges = tDist.m_bDivideRemoteRanges;
tIndex.m_sHaStrategy = HAStrategyToStr ( tDist.m_eHaStrategy );
for ( const auto & tAgentIt : tDist.m_dAgents )
{
if ( !tAgentIt || !tAgentIt->GetLength() )
continue;
AgentConfigDesc_t & tAgent = tIndex.m_dAgents.Add();
tAgent.m_sConfig = tAgentIt->GetConfigStr();
tAgent.m_bBlackhole = (*tAgentIt)[0].m_bBlackhole;
tAgent.m_bPersistent = (*tAgentIt)[0].m_bPersistent;
}
return tIndex;
}
| 50,592
|
C++
|
.cpp
| 1,342
| 35.194486
| 227
| 0.693331
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,879
|
indextool.cpp
|
manticoresoftware_manticoresearch/src/indextool.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxstd.h"
#include "sphinxutils.h"
#include "sphinxint.h"
#include "fileutils.h"
#include "sphinxrt.h"
#include "killlist.h"
#include "docidlookup.h"
#include "indexfiles.h"
#include "stripper/html_stripper.h"
#include "tokenizer/charset_definition_parser.h"
#include "indexcheck.h"
#include "secondarylib.h"
#include "knnlib.h"
#include "detail/indexlink.h"
#include <ctime>
static CSphString g_sDataDir;
static bool g_bConfigless = false;
static bool IsConfigless()
{
return g_bConfigless;
}
static CSphString GetPathForNewIndex ( const CSphString & sIndexName )
{
CSphString sRes;
if ( g_sDataDir.Length() && !g_sDataDir.Ends("/") && !g_sDataDir.Ends("\\") )
sRes.SetSprintf ( "%s/%s", g_sDataDir.cstr(), sIndexName.cstr() );
else
sRes.SetSprintf ( "%s%s", g_sDataDir.cstr(), sIndexName.cstr() );
return sRes;
}
static void MakeRelativePath ( CSphString & sPath )
{
bool bAbsolute = strchr ( sPath.cstr(), '/' ) || strchr ( sPath.cstr(), '\\' );
if ( !bAbsolute )
sPath.SetSprintf ( "%s/%s/%s", g_sDataDir.cstr(), sPath.cstr(), sPath.cstr() );
}
class FilenameBuilder_c : public FilenameBuilder_i
{
public:
FilenameBuilder_c ( CSphString sIndex );
CSphString GetFullPath ( const CSphString & sName ) const final;
private:
const CSphString m_sIndex;
};
FilenameBuilder_c::FilenameBuilder_c ( CSphString sIndex )
: m_sIndex ( std::move ( sIndex ) )
{}
CSphString FilenameBuilder_c::GetFullPath ( const CSphString & sName ) const
{
if ( !IsConfigless() || !sName.Length() )
return sName;
CSphString sPath = GetPathForNewIndex ( m_sIndex );
StrVec_t dFiles;
StringBuilder_c sNewValue {" "};
// we assume that path has been stripped before
StrVec_t dValues = sphSplit ( sName.cstr(), sName.Length(), " \t," );
for ( auto & i : dValues )
{
if ( !i.Length() )
continue;
CSphString & sNew = dFiles.Add();
sNew.SetSprintf ( "%s/%s", sPath.cstr(), i.Trim().cstr() );
sNewValue << sNew;
}
return sNewValue.cstr();
}
static std::unique_ptr<FilenameBuilder_i> CreateFilenameBuilder ( const char * szIndex )
{
if ( IsConfigless() )
return std::make_unique<FilenameBuilder_c> ( szIndex );
return nullptr;
}
static void StripStdin ( const char * sIndexAttrs, const char * sRemoveElements )
{
CSphString sError;
CSphHTMLStripper tStripper ( true );
if ( !tStripper.SetIndexedAttrs ( sIndexAttrs, sError )
|| !tStripper.SetRemovedElements ( sRemoveElements, sError ) )
sphDie ( "failed to configure stripper: %s", sError.cstr() );
CSphVector<BYTE> dBuffer;
while ( !feof(stdin) )
{
char sBuffer[1024];
auto iLen = (int) fread ( sBuffer, 1, sizeof(sBuffer), stdin );
if ( !iLen )
break;
dBuffer.Append ((BYTE*)sBuffer, iLen);
}
dBuffer.Add ( 0 );
tStripper.Strip ( &dBuffer[0] );
fprintf ( stdout, "dumping stripped results...\n%s\n", &dBuffer[0] );
}
static void ApplyMorphology ( CSphIndex * pIndex )
{
CSphVector<BYTE> dInBuffer, dOutBuffer;
const int READ_BUFFER_SIZE = 1024;
dInBuffer.Reserve ( READ_BUFFER_SIZE );
char sBuffer[READ_BUFFER_SIZE];
while ( !feof(stdin) )
{
auto iLen = (int) fread ( sBuffer, 1, sizeof(sBuffer), stdin );
if ( !iLen )
break;
dInBuffer.Append ( sBuffer, iLen );
}
dInBuffer.Add(0);
dOutBuffer.Reserve ( dInBuffer.GetLength() );
TokenizerRefPtr_c pTokenizer = pIndex->GetTokenizer()->Clone ( SPH_CLONE_INDEX );
DictRefPtr_c pDict = pIndex->GetDictionary();
BYTE * sBufferToDump = &dInBuffer[0];
if ( pTokenizer )
{
pTokenizer->SetBuffer ( &dInBuffer[0], dInBuffer.GetLength() );
while ( BYTE * sToken = pTokenizer->GetToken() )
{
if ( pDict )
pDict->ApplyStemmers ( sToken );
auto iLen = (int) strlen ( (char *)sToken );
sToken[iLen] = ' ';
dOutBuffer.Append ( sToken, iLen+1 );
}
if ( dOutBuffer.GetLength() )
dOutBuffer[dOutBuffer.GetLength()-1] = 0;
else
dOutBuffer.Add(0);
sBufferToDump = &dOutBuffer[0];
}
fprintf ( stdout, "dumping stemmed results...\n%s\n", sBufferToDump );
}
static void CharsetFold ( CSphIndex * pIndex, FILE * fp )
{
CSphVector<BYTE> sBuf1 ( 16384 );
CSphVector<BYTE> sBuf2 ( 16384 );
const CSphLowercaser& tLC = pIndex->GetTokenizer()->GetLowercaser();
#if _WIN32
setmode ( fileno(stdout), O_BINARY );
#endif
int iBuf1 = 0; // how many leftover bytes from previous iteration
while ( !feof(fp) )
{
auto iGot = (int) fread ( sBuf1.Begin()+iBuf1, 1, sBuf1.GetLength()-iBuf1, fp );
if ( iGot<0 )
sphDie ( "read error: %s", strerrorm(errno) );
if ( iGot==0 )
if ( feof(fp) )
if ( iBuf1==0 )
break;
const BYTE * pIn = sBuf1.Begin();
const BYTE * pInMax = pIn + iBuf1 + iGot;
if ( pIn==pInMax && feof(fp) )
break;
// tricky bit
// on full buffer, and not an eof, terminate a bit early
// to avoid codepoint vs buffer boundary issue
if ( ( iBuf1+iGot )==sBuf1.GetLength() && iGot!=0 )
pInMax -= 16;
// do folding
BYTE * pOut = sBuf2.Begin();
BYTE * pOutMax = pOut + sBuf2.GetLength() - 16;
while ( pIn < pInMax )
{
int iCode = sphUTF8Decode ( pIn );
if ( iCode==0 )
pIn++; // decoder does not do that!
assert ( iCode>=0 );
if ( iCode!=0x09 && iCode!=0x0A && iCode!=0x0D )
{
iCode = tLC.ToLower ( iCode ) & 0xffffffUL;
if ( !iCode )
iCode = 0x20;
}
pOut += sphUTF8Encode ( pOut, iCode );
if ( pOut>=pOutMax )
{
fwrite ( sBuf2.Begin(), 1, pOut-sBuf2.Begin(), stdout );
pOut = sBuf2.Begin();
}
}
fwrite ( sBuf2.Begin(), 1, pOut-sBuf2.Begin(), stdout );
// now move around leftovers
BYTE * pRealEnd = sBuf1.Begin() + iBuf1 + iGot;
if ( pIn < pRealEnd )
{
iBuf1 = int ( pRealEnd - pIn );
memmove ( sBuf1.Begin(), pIn, iBuf1 );
}
}
}
#pragma pack(push,4)
struct IDFWord_t
{
uint64_t m_uWordID;
DWORD m_iDocs;
};
#pragma pack(pop)
STATIC_SIZE_ASSERT ( IDFWord_t, 12 );
static bool BuildIDF ( const CSphString & sFilename, const StrVec_t & dFiles, CSphString & sError, bool bSkipUnique )
{
// text dictionaries are ordered alphabetically - we can use that fact while reading
// to merge duplicates, calculate total number of occurrences and process bSkipUnique
// this method is about 3x faster and consumes ~2x less memory than a hash based one
typedef char StringBuffer_t [ 3*SPH_MAX_WORD_LEN+16+128 ]; // { dict-keyowrd, 32bit number, 32bit number, 64bit number }
int64_t iTotalDocuments = 0;
int64_t iTotalWords = 0;
int64_t iReadWords = 0;
int64_t iMergedWords = 0;
int64_t iSkippedWords = 0;
int64_t iReadBytes = 0;
int64_t iTotalBytes = 0;
const int64_t tmStart = sphMicroTimer ();
int iFiles = dFiles.GetLength ();
CSphVector<CSphAutoreader> dReaders ( iFiles );
ARRAY_FOREACH ( i, dFiles )
{
if ( !dReaders[i].Open ( dFiles[i], sError ) )
return false;
iTotalBytes += dReaders[i].GetFilesize ();
}
// internal state
CSphFixedVector<StringBuffer_t> dWords ( iFiles );
CSphVector<int> dDocs ( iFiles );
CSphVector<bool> dFinished ( iFiles );
dFinished.Fill ( false );
bool bPreread = false;
// current entry
StringBuffer_t sWord = {0};
DWORD iDocs = 0;
// output vector, preallocate 10M
CSphTightVector<IDFWord_t> dEntries;
dEntries.Reserve ( 1024*1024*10 );
for ( int i=0;; )
{
// read next input
while (true)
{
int iLen;
char * sBuffer = dWords[i];
if ( ( iLen = dReaders[i].GetLine ( sBuffer, sizeof(StringBuffer_t) ) )>=0 )
{
iReadBytes += iLen;
// find keyword pattern ( ^<keyword>,<docs>,... )
char * p1 = strchr ( sBuffer, ',' );
if ( p1 )
{
char * p2 = strchr ( p1+1, ',' );
if ( p2 )
{
*p1 = *p2 = '\0';
int iDocuments = atoi ( p1+1 );
if ( iDocuments )
{
dDocs[i] = iDocuments;
iReadWords++;
break;
}
}
} else
{
// keyword pattern not found (rather rare case), try to parse as a header, then
char sSearch[] = "total-documents: ";
if ( strstr ( sBuffer, sSearch )==sBuffer )
iTotalDocuments += atoi ( sBuffer+strlen(sSearch) );
}
} else
{
dFinished[i] = true;
break;
}
}
bool bEnd = !dFinished.Contains ( false );
i++;
if ( !bPreread && i==iFiles )
bPreread = true;
if ( bPreread )
{
// find the next smallest input
i = 0;
for ( int j=0; j<iFiles; j++ )
if ( !dFinished[j] && ( dFinished[i] || strcmp ( dWords[i], dWords[j] )>0 ) )
i = j;
// merge if we got the same word
if ( !strcmp ( sWord, dWords[i] ) && !bEnd )
{
iDocs += dDocs[i];
iMergedWords++;
} else
{
if ( sWord[0]!='\0' )
{
if ( !bSkipUnique || iDocs>1 )
{
IDFWord_t & tEntry = dEntries.Add ();
tEntry.m_uWordID = sphFNV64 ( sWord );
tEntry.m_iDocs = iDocs;
iTotalWords++;
} else
iSkippedWords++;
}
strncpy ( sWord, dWords[i], sizeof ( dWords[i] ) - 1 );
iDocs = dDocs[i];
}
}
if ( ( iReadWords & 0xffff )==0 || bEnd )
fprintf ( stderr, "read %.1f of %.1f MB, %.1f%% done%c", ( bEnd ? float(iTotalBytes) : float(iReadBytes) )/1000000.0f,
float(iTotalBytes)/1000000.0f, bEnd ? 100.0f : float(iReadBytes)*100.0f/float(iTotalBytes), bEnd ? '\n' : '\r' );
if ( bEnd )
break;
}
fprintf ( stdout, INT64_FMT" documents, " INT64_FMT " words (" INT64_FMT " read, " INT64_FMT " merged, " INT64_FMT " skipped)\n",
iTotalDocuments, iTotalWords, iReadWords, iMergedWords, iSkippedWords );
// write to disk
fprintf ( stdout, "writing %s (%1.fM)...\n", sFilename.cstr(), float(iTotalWords*sizeof(IDFWord_t))/1000000.0f );
dEntries.Sort ( bind ( &IDFWord_t::m_uWordID ) );
CSphWriter tWriter;
if ( !tWriter.OpenFile ( sFilename, sError ) )
return false;
// write file header
tWriter.PutOffset ( iTotalDocuments );
// write data
tWriter.PutBytes ( dEntries.Begin(), dEntries.GetLength()*sizeof(IDFWord_t) );
tWriter.CloseFile();
int tmWallMsec = (int)( ( sphMicroTimer() - tmStart )/1000 );
fprintf ( stdout, "finished in %d.%d sec\n", tmWallMsec/1000, (tmWallMsec/100)%10 );
return true;
}
static bool MergeIDF ( const CSphString & sFilename, const StrVec_t & dFiles, CSphString & sError, bool bSkipUnique )
{
// binary dictionaries are ordered by 64-bit word id, we can use that for merging.
// read every file, check repeating word ids, merge if found, write to disk if not
// memory requirements are about ~4KB per input file (used for buffered reading)
int64_t iTotalDocuments = 0;
int64_t iTotalWords = 0;
int64_t iReadWords = 0;
int64_t iMergedWords = 0;
int64_t iSkippedWords = 0;
int64_t iReadBytes = 0;
int64_t iTotalBytes = 0;
const int64_t tmStart = sphMicroTimer ();
int iFiles = dFiles.GetLength ();
// internal state
CSphVector<CSphAutoreader> dReaders ( iFiles );
CSphVector<IDFWord_t> dWords ( iFiles );
CSphVector<int64_t> dRead ( iFiles );
CSphVector<int64_t> dSize ( iFiles );
CSphVector<BYTE*> dBuffers ( iFiles );
CSphVector<bool> dFinished ( iFiles );
dFinished.Fill ( false );
bool bPreread = false;
// current entry
IDFWord_t tWord;
tWord.m_uWordID = 0;
tWord.m_iDocs = 0;
// preread buffer
const int iEntrySize = sizeof(int64_t)+sizeof(DWORD);
const int iBufferSize = iEntrySize*256;
// initialize vectors
ARRAY_FOREACH ( i, dFiles )
{
if ( !dReaders[i].Open ( dFiles[i], sError ) )
return false;
iTotalDocuments += dReaders[i].GetOffset ();
dRead[i] = 0;
dSize[i] = dReaders[i].GetFilesize() - sizeof( SphOffset_t );
dBuffers[i] = new BYTE [ iBufferSize ];
iTotalBytes += dSize[i];
}
// open output file
CSphWriter tWriter;
if ( !tWriter.OpenFile ( sFilename, sError ) )
return false;
// write file header
tWriter.PutOffset ( iTotalDocuments );
for ( int i=0;; )
{
if ( dRead[i]<dSize[i] )
{
iReadBytes += iEntrySize;
// This part basically does the following:
// dWords[i].m_uWordID = dReaders[i].GetOffset ();
// dWords[i].m_iDocs = dReaders[i].GetDword ();
// but reading by 12 bytes seems quite slow (SetBuffers doesn't help)
// the only way to speed it up is to buffer up a few entries manually
int iOffset = (int)( dRead[i] % iBufferSize );
if ( iOffset==0 )
dReaders[i].GetBytes ( dBuffers[i], ( dSize[i]-dRead[i] )<iBufferSize ? (int)( dSize[i]-dRead[i] ) : iBufferSize );
dWords[i].m_uWordID = *(uint64_t*)( dBuffers[i]+iOffset );
dWords[i].m_iDocs = *(DWORD*)( dBuffers[i]+iOffset+sizeof(uint64_t) );
dRead[i] += iEntrySize;
iReadWords++;
} else
dFinished[i] = true;
bool bEnd = !dFinished.Contains ( false );
i++;
if ( !bPreread && i==iFiles )
bPreread = true;
if ( bPreread )
{
// find the next smallest input
i = 0;
for ( int j=0; j<iFiles; j++ )
if ( !dFinished[j] && ( dFinished[i] || dWords[i].m_uWordID>dWords[j].m_uWordID ) )
i = j;
// merge if we got the same word
if ( tWord.m_uWordID==dWords[i].m_uWordID && !bEnd )
{
tWord.m_iDocs += dWords[i].m_iDocs;
iMergedWords++;
} else
{
if ( tWord.m_uWordID )
{
if ( !bSkipUnique || tWord.m_iDocs>1 )
{
tWriter.PutOffset ( tWord.m_uWordID );
tWriter.PutDword ( tWord.m_iDocs );
iTotalWords++;
} else
iSkippedWords++;
}
tWord = dWords[i];
}
}
if ( ( iReadWords & 0xffff )==0 || bEnd )
fprintf ( stderr, "read %.1f of %.1f MB, %.1f%% done%c", ( bEnd ? float(iTotalBytes) : float(iReadBytes) )/1000000.0f,
float(iTotalBytes)/1000000.0f, bEnd ? 100.0f : float(iReadBytes)*100.0f/float(iTotalBytes), bEnd ? '\n' : '\r' );
if ( bEnd )
break;
}
tWriter.CloseFile ();
ARRAY_FOREACH ( i, dFiles )
SafeDeleteArray ( dBuffers[i] );
fprintf ( stdout, INT64_FMT" documents, " INT64_FMT " words (" INT64_FMT " read, " INT64_FMT " merged, " INT64_FMT " skipped)\n",
iTotalDocuments, iTotalWords, iReadWords, iMergedWords, iSkippedWords );
int tmWallMsec = (int)( ( sphMicroTimer() - tmStart )/1000 );
fprintf ( stdout, "finished in %d.%d sec\n", tmWallMsec/1000, (tmWallMsec/100)%10 );
return true;
}
//////////////////////////////////////////////////////////////////////////
static const DWORD META_HEADER_MAGIC = 0x54525053; ///< my magic 'SPRT' header
static const DWORD META_VERSION = 18;
static void InfoMetaSchemaColumn ( CSphReader & rdInfo, DWORD uVersion )
{
CSphString sName = rdInfo.GetString ();
fprintf ( stdout, "%s", sName.cstr());
fprintf ( stdout, " %s", AttrType2Str ((ESphAttr)rdInfo.GetDword ()) );
rdInfo.GetDword (); // ignore rowitem
fprintf ( stdout, " offset %u/", rdInfo.GetDword () );
fprintf ( stdout, "count %u", rdInfo.GetDword() );
fprintf ( stdout, " payload %d", rdInfo.GetByte() );
if ( uVersion>=61 )
fprintf ( stdout, " attr flags %u", rdInfo.GetDword() );
}
static void InfoMetaSchemaField ( CSphReader & rdInfo, DWORD uVersion )
{
if ( uVersion>=57 )
{
CSphString sName = rdInfo.GetString();
fprintf ( stdout, "%s", sName.cstr() );
fprintf ( stdout, " field flags %u", rdInfo.GetDword() );
fprintf ( stdout, " payload %d", rdInfo.GetByte () );
}
else
InfoMetaSchemaColumn ( rdInfo, uVersion );
}
static void InfoMetaSchema ( CSphReader &rdMeta, DWORD uVersion )
{
fprintf ( stdout, "\n ======== SCHEMA ========" );
int iNumFields = rdMeta.GetDword ();
fprintf ( stdout, "\n Fields: %d", iNumFields );
for ( int i = 0; i<iNumFields; ++i )
{
fprintf ( stdout, "\n%02d. ", i + 1 );
InfoMetaSchemaField ( rdMeta, uVersion );
}
int iNumAttrs = rdMeta.GetDword ();
fprintf ( stdout, "\n Attributes: %d", iNumAttrs );
for ( int i = 0; i<iNumAttrs; i++ )
{
fprintf ( stdout, "\n%02d. ", i + 1 );
InfoMetaSchemaColumn ( rdMeta, uVersion );
}
}
static void InfoMetaIndexSettings ( CSphReader &tReader, DWORD uVersion )
{
fprintf ( stdout, "\n ======== TABLE SETTINGS ========" );
fprintf ( stdout, "\nMinPrefixLen: %u", tReader.GetDword () );
fprintf ( stdout, "\nMinInfixLen: %u", tReader.GetDword () );
fprintf ( stdout, "\nMaxSubstringLen: %u", tReader.GetDword () );
fprintf ( stdout, "\nbHtmlStrip: %d", tReader.GetByte () );
fprintf ( stdout, "\nsHtmlIndexAttrs: %s", tReader.GetString ().cstr() );
fprintf ( stdout, "\nsHtmlRemoveElements: %s", tReader.GetString ().cstr() );
fprintf ( stdout, "\nbIndexExactWords: %d", tReader.GetByte () );
fprintf ( stdout, "\neHitless: %u", tReader.GetDword () );
fprintf ( stdout, "\neHitFormat: %u", tReader.GetDword () );
fprintf ( stdout, "\nbIndexSP: %d", tReader.GetByte () );
fprintf ( stdout, "\nsZones: %s", tReader.GetString ().cstr () );
fprintf ( stdout, "\niBoundaryStep: %u", tReader.GetDword () );
fprintf ( stdout, "\niStopwordStep: %u", tReader.GetDword () );
fprintf ( stdout, "\niOvershortStep: %u", tReader.GetDword () );
fprintf ( stdout, "\niEmbeddedLimit: %u", tReader.GetDword () );
fprintf ( stdout, "\neBigramIndex: %d", tReader.GetByte () );
fprintf ( stdout, "\nsBigramWords: %s", tReader.GetString ().cstr () );
fprintf ( stdout, "\nbIndexFieldLens: %d", tReader.GetByte () );
fprintf ( stdout, "\nePreprocessor: %d", tReader.GetByte () );
tReader.GetString(); // was: RLP context
fprintf ( stdout, "\nsIndexTokenFilter: %s", tReader.GetString ().cstr () );
fprintf ( stdout, "\ntBlobUpdateSpace: " INT64_FMT, tReader.GetOffset () );
if ( uVersion>=56 )
fprintf ( stdout, "\niSkiplistBlockSize: %u", tReader.GetDword () );
if ( uVersion>=60 )
fprintf ( stdout, "\nsHitlessFiles: %s", tReader.GetString ().cstr () );
}
static void InfoMetaFileInfo ( CSphReader &tReader )
{
fprintf ( stdout, "\n ======== FILE INFO ========" );
fprintf ( stdout, "\nuSize: " INT64_FMT, tReader.GetOffset () );
fprintf ( stdout, "\nuCTime: " INT64_FMT, tReader.GetOffset () );
fprintf ( stdout, "\nuMTime: " INT64_FMT, tReader.GetOffset () );
fprintf ( stdout, "\nuCRC32: %u", tReader.GetDword () );
}
static bool InfoMetaTokenizerSettings ( CSphReader &tReader, DWORD uVersion)
{
fprintf ( stdout, "\n ======== TOKENIZER SETTINGS ========" );
int m_iType = tReader.GetByte ();
fprintf ( stdout, "\niType: %d", m_iType );
if ( m_iType!=TOKENIZER_UTF8 && m_iType!=TOKENIZER_NGRAM )
{
fprintf (stdout, "\ncan't load an old table with SBCS tokenizer" );
return false;
}
fprintf ( stdout, "\nsCaseFolding: %s", tReader.GetString ().cstr () );
fprintf ( stdout, "\niMinWordLen: %u", tReader.GetDword() );
bool bsyn = !!tReader.GetByte ();
fprintf ( stdout, "\nbEmbeddedSynonyms: %d", bsyn );
if ( bsyn )
{
int nSynonyms = ( int ) tReader.GetDword ();
fprintf ( stdout, "\nnSynonyms: %d", nSynonyms );
for ( int i=0; i<nSynonyms; ++i)
fprintf ( stdout, "\nEmbedded Syn(%d): %s", i, tReader.GetString ().cstr () );
}
fprintf ( stdout, "\nsSynonymsFile: %s", tReader.GetString ().cstr () );
InfoMetaFileInfo ( tReader );
fprintf ( stdout, "\nsBoundary: %s", tReader.GetString ().cstr () );
fprintf ( stdout, "\nsIgnoreChars : %s", tReader.GetString ().cstr () );
fprintf ( stdout, "\niNgramLen : %u", tReader.GetDword ());
fprintf ( stdout, "\nsNgramChars : %s", tReader.GetString ().cstr () );
fprintf ( stdout, "\nsBlendChars : %s", tReader.GetString ().cstr () );
fprintf ( stdout, "\nsBlendMode : %s", tReader.GetString ().cstr () );
return true;
}
static void InfoMetaDictionarySettings ( CSphReader & tReader )
{
fprintf ( stdout, "\n ======== DICTIONARY SETTINGS ========" );
fprintf ( stdout, "\nsMorphology : %s", tReader.GetString ().cstr () );
fprintf ( stdout, "\nsMorphFields : %s", tReader.GetString ().cstr () );
bool bEmbeddedStopwords = !!tReader.GetByte ();
fprintf ( stdout, "\nbEmbeddedStopwords : %d", bEmbeddedStopwords );
if ( bEmbeddedStopwords )
{
int nStopwords = ( int ) tReader.GetDword ();
fprintf ( stdout, "\nnStopwords : %d", nStopwords );
for ( int i = 0; i<nStopwords; ++i )
fprintf ( stdout, "\nEmbedded Stp(%d): " INT64_FMT, i, tReader.UnzipOffset () );
}
fprintf ( stdout, "\nsStopwords : %s", tReader.GetString ().cstr () );
int nFiles = tReader.GetDword ();
fprintf ( stdout, "\nnFiles : %d", nFiles );
for ( int i = 0; i<nFiles; ++i )
{
fprintf ( stdout, "\nFile %d: %s", i+1, tReader.GetString ().cstr () );
InfoMetaFileInfo ( tReader );
}
bool bEmbeddedWordforms = !!tReader.GetByte ();
fprintf ( stdout, "\nbEmbeddedWordforms : %d", bEmbeddedWordforms );
if ( bEmbeddedWordforms )
{
int nWordforms = ( int ) tReader.GetDword ();
fprintf ( stdout, "\nnWordforms : %d", nWordforms );
for ( int i = 0; i<nWordforms; ++i )
fprintf ( stdout, "\nEmbedded Wrd(%d): %s", i, tReader.GetString ().cstr() );
}
int iWrdForms = tReader.GetDword ();
fprintf ( stdout, "\niWordForms : %d", iWrdForms );
for ( int i = 0; i<iWrdForms; ++i )
{
fprintf ( stdout, "\nFile %d: %s", i + 1, tReader.GetString ().cstr () );
InfoMetaFileInfo ( tReader );
}
fprintf ( stdout, "\niMinStemmingLen : %u", tReader.GetDword () );
fprintf ( stdout, "\nbWordDict : %d", tReader.GetByte () );
fprintf ( stdout, "\nbStopwordsUnstemmed : %d", tReader.GetByte () );
fprintf ( stdout, "\nsMorphFingerprint : %s", tReader.GetString ().cstr() );
}
static void InfoMetaFieldFilterSettings ( CSphReader & tReader )
{
fprintf ( stdout, "\n ======== FIELD FILTER SETTINGS ========" );
int nRegexps = tReader.GetDword ();
fprintf (stdout, "\n %d filters", nRegexps);
if ( !nRegexps )
return;
for (int i=0; i<nRegexps; ++i)
fprintf (stdout, "\n Filter(%d) = %s", i, tReader.GetString ().cstr());
}
static void InfoMeta ( const CSphString & sMeta )
{
fprintf ( stdout, "\nDescribing meta %s", sMeta.cstr());
CSphString sError;
CSphAutoreader rdMeta;
if ( !rdMeta.Open ( sMeta, sError ) )
{
fprintf (stdout, "\n unable to open file: %s", sError.cstr());
return;
}
DWORD dwFoo = rdMeta.GetDword();
fprintf ( stdout, "\nMagick: 0x%x (expected 0x%x)", (uint32_t)dwFoo, (uint32_t)META_HEADER_MAGIC );
if ( dwFoo!=META_HEADER_MAGIC )
{
fprintf ( stdout, "\nwrong magick!");
return;
}
DWORD uVersion = rdMeta.GetDword ();
fprintf ( stdout, "\nVersion: %u (expected 1 to %u)", uVersion, META_VERSION );
if ( uVersion==0 || uVersion>META_VERSION )
{
fprintf ( stdout, "%s is v.%u, binary is v.%u", sMeta.cstr (), uVersion, META_VERSION );
return;
}
fprintf ( stdout, "\nTotal documents: %u", rdMeta.GetDword());
fprintf ( stdout, "\nTotal bytes: " INT64_FMT, rdMeta.GetOffset () );
fprintf ( stdout, "\nTID: " INT64_FMT, rdMeta.GetOffset () );
DWORD uSettingsVer = rdMeta.GetDword ();
fprintf (stdout, "\n Settings ver: %u", uSettingsVer );
InfoMetaSchema(rdMeta, uSettingsVer);
InfoMetaIndexSettings(rdMeta, uSettingsVer);
InfoMetaTokenizerSettings (rdMeta, uSettingsVer);
InfoMetaDictionarySettings(rdMeta);
fprintf ( stdout, "\niWordsCheckpoint: %u", rdMeta.GetDword () );
fprintf ( stdout, "\niMaxCodepointLength: %u", rdMeta.GetDword () );
fprintf ( stdout, "\niBloomKeyLen: %d", rdMeta.GetByte () );
fprintf ( stdout, "\niBloomHashesCount: %d", rdMeta.GetByte () );
InfoMetaFieldFilterSettings ( rdMeta );
CSphFixedVector<int> dChunkNames ( 0 );
int iLen = ( int ) rdMeta.GetDword ();
fprintf ( stdout, "\nNum of Chunknames: %d", iLen );
dChunkNames.Reset ( iLen );
rdMeta.GetBytes ( dChunkNames.Begin (), iLen * sizeof ( int ) );
for ( int nm : dChunkNames)
fprintf (stdout, "\n %d", nm);
if ( uVersion>=17 )
fprintf ( stdout, "\nSoft RAM limit: " INT64_FMT, rdMeta.GetOffset () );
}
struct IndexInfo_t
{
bool m_bEnabled {false};
DWORD m_nDocs {0};
CSphString m_sName;
CSphString m_sPath;
CSphFixedVector<DocID_t> m_dKilllist;
KillListTargets_c m_tTargets;
CSphMappedBuffer<BYTE> m_tLookup;
DeadRowMap_Disk_c m_tDeadRowMap;
IndexInfo_t()
: m_dKilllist ( 0 )
{}
};
static void ApplyKilllist ( IndexInfo_t & tTarget, const IndexInfo_t & tKiller, const KillListTarget_t & tSettings )
{
if ( tSettings.m_uFlags & KillListTarget_t::USE_DOCIDS )
{
LookupReaderIterator_c tTargetReader ( tTarget.m_tLookup.GetReadPtr() );
LookupReaderIterator_c tKillerReader ( tKiller.m_tLookup.GetReadPtr() );
KillByLookup ( tTargetReader, tKillerReader, tTarget.m_tDeadRowMap );
}
if ( tSettings.m_uFlags & KillListTarget_t::USE_KLIST )
{
LookupReaderIterator_c tTargetReader ( tTarget.m_tLookup.GetReadPtr() );
DocidListReader_c tKillerReader ( tKiller.m_dKilllist );
KillByLookup ( tTargetReader, tKillerReader, tTarget.m_tDeadRowMap );
}
}
static void ApplyKilllists ( CSphConfig & hConf )
{
CSphFixedVector<IndexInfo_t> dIndexes ( hConf["index"].GetLength() );
int iIndex = 0;
for ( auto& tIndex_ : hConf["index"] )
{
CSphConfigSection & hIndex = tIndex_.second;
if ( !hIndex("path") )
continue;
const CSphVariant * pType = hIndex ( "type" );
if ( pType && ( *pType=="rt" || *pType=="distributed" || *pType=="percolate" ) )
continue;
IndexInfo_t & tIndex = dIndexes[iIndex++];
tIndex.m_sName = tIndex_.first.cstr();
tIndex.m_sPath = RedirectToRealPath ( hIndex["path"].strval() );
IndexFiles_c tIndexFiles ( tIndex.m_sPath, tIndex.m_sName.cstr () );
if ( !tIndexFiles.CheckHeader() )
{
fprintf ( stdout, "WARNING: unable to index header for table %s\n", tIndex.m_sName.cstr() );
continue;
}
// no lookups prior to v.54
if ( tIndexFiles.GetVersion() < 54 )
{
fprintf ( stdout, "WARNING: table '%s' version: %u, min supported is 54\n", tIndex.m_sName.cstr(), tIndexFiles.GetVersion() );
continue;
}
CSphString sError;
{
auto pIndex = sphCreateIndexPhrase ( "", tIndex.m_sPath );
if ( !pIndex->LoadKillList ( &tIndex.m_dKilllist, tIndex.m_tTargets, sError ) )
{
fprintf ( stdout, "WARNING: unable to load kill-list for table %s: %s\n", tIndex.m_sName.cstr(), sError.cstr() );
continue;
}
StrVec_t dWarnings;
if ( !pIndex->Prealloc ( false, nullptr, dWarnings ) )
{
fprintf ( stdout, "WARNING: unable to prealloc table %s: %s\n", tIndex.m_sName.cstr(), sError.cstr() );
continue;
}
for ( const auto & i : dWarnings )
fprintf ( stdout, "WARNING: table %s: %s\n", tIndex.m_sName.cstr(), i.cstr() );
tIndex.m_nDocs = (DWORD)pIndex->GetStats().m_iTotalDocuments;
}
CSphString sLookup;
sLookup.SetSprintf ( "%s.spt", tIndex.m_sPath.cstr() );
if ( !tIndex.m_tLookup.Setup ( sLookup.cstr(), sError, false ) )
{
fprintf ( stdout, "WARNING: unable to load lookup for table %s: %s\n", tIndex.m_sName.cstr(), sError.cstr() );
continue;
}
CSphString sDeadRowMap;
sDeadRowMap.SetSprintf ( "%s.spm", tIndex.m_sPath.cstr() );
if ( !tIndex.m_tDeadRowMap.Prealloc ( tIndex.m_nDocs, sDeadRowMap, sError ) )
{
fprintf ( stdout, "WARNING: unable to load dead row map for table %s: %s\n", tIndex.m_sName.cstr(), sError.cstr() );
continue;
}
tIndex.m_bEnabled = true;
}
for ( const auto & tIndex : dIndexes )
{
if ( !tIndex.m_bEnabled || !tIndex.m_tTargets.m_dTargets.GetLength() )
continue;
fprintf ( stdout, "applying kill-list of table %s\n", tIndex.m_sName.cstr() );
for ( const auto & tTarget : tIndex.m_tTargets.m_dTargets )
{
if ( tTarget.m_sIndex==tIndex.m_sName )
{
fprintf ( stdout, "WARNING: table '%s': appying killlist to itself\n", tIndex.m_sName.cstr() );
continue;
}
for ( auto & tTargetIndex : dIndexes )
{
if ( !tTargetIndex.m_bEnabled || tTarget.m_sIndex!=tTargetIndex.m_sName )
continue;
ApplyKilllist ( tTargetIndex, tIndex, tTarget );
}
}
}
for ( auto & tIndex : dIndexes )
{
if ( !tIndex.m_bEnabled )
continue;
// flush maps
CSphString sError;
if ( !tIndex.m_tDeadRowMap.Flush ( true, sError ) )
fprintf ( stdout, "WARNING: table '%s': unable to flush dead row map: %s\n", tIndex.m_sName.cstr(), sError.cstr() );
// delete killlists
CSphString sKilllist;
sKilllist.SetSprintf ( "%s.spk", tIndex.m_sPath.cstr() );
if ( !sphIsReadable(sKilllist) )
continue;
::unlink ( sKilllist.cstr() );
}
}
static void ShowVersion()
{
const char * szColumnarVer = GetColumnarVersionStr();
CSphString sColumnar = "";
if ( szColumnarVer )
sColumnar.SetSprintf ( " (columnar %s)", szColumnarVer );
const char * sSiVer = GetSecondaryVersionStr();
CSphString sSi = "";
if ( sSiVer )
sSi.SetSprintf ( " (secondary %s)", sSiVer );
fprintf ( stdout, "%s%s%s%s", szMANTICORE_NAME, sColumnar.cstr(), sSi.cstr(), szMANTICORE_BANNER_TEXT );
}
static void ShowHelp ()
{
/// notice: set tab width=8 to correctly see content of the message
fprintf ( stdout,
R"(Usage: indextool <COMMAND> [OPTIONS]
Commands are:
-h, --help display this help message
-v display version information
--check <TABLE> perform table consistency check
--check-disk-chunk <CHUNK_ID> perform single disk chunk consistency check (to be used together with --check)
--check-id-dups <CHUNK_ID> check if there are duplicate ids (to be used together with --check)
--rotate rotate table after --check in case it's valid
--checkconfig perform config consistency check
--dumpconfig <SPH-FILE> dump table header in config format by file name
--dumpdocids <TABLE> dump docids by table name
--dumpdict <SPI-FILE> dump dictionary by file name
--dumpdict <TABLE> dump dictionary
--dumpheader <SPH-FILE>|<META-FILE>
dump table header, or rt table meta by file name
--dumpheader <TABLE> dump table header by table name
--dumphitlist <TABLE> <KEYWORD>
--dumphitlist <TABLE> --wordid <ID>
dump hits for a given keyword
--docextract TBL DOCID runs usual table check pass of whole dictionary/docs/hits,
and collects all the words and hits belonging to requested document.
Then all of the words are placed in the order according to their fields
and positions, and result is printed, grouping by field.
--fold <TABLE> [FILE] fold FILE or stdin using TABLE charset_table
--htmlstrip <TABLE> filter stdin using table HTML stripper settings
--buildidf <TABLE1.dict> [TABLE2.dict ...] [--skip-uniq] --out <GLOBAL.idf>
join --stats dictionary dumps into global.idf file
--mergeidf <NODE1.idf> [NODE2.idf ...] [--skip-uniq] --out <GLOBAL.idf>
merge several .idf files into one file
--apply-killlists apply table killlists
Options are:
-c, --config <file> use given config file instead of defaults
-q, --quiet be quiet, skip banner etc (useful with --fold etc)
--strip-path strip path from filenames referenced by table
(eg. stopwords, exceptions, etc)
--stats show total statistics in the dictionary dump
--skip-uniq skip unique (df=1) words in the .idf files
)"
);
}
enum class IndextoolCmd_e
{
NOTHING,
DUMPHEADER,
DUMPCONFIG,
DUMPDOCIDS,
DUMPHITLIST,
DUMPDICT,
CHECK,
EXTRACT,
STRIP,
MORPH,
BUILDIDF,
MERGEIDF,
CHECKCONFIG,
FOLD,
APPLYKLISTS
};
static IndextoolCmd_e g_eCommand = IndextoolCmd_e::NOTHING;
static const char * g_sCommands[] = {"", "dumpheader", "dumpconfig", "dumpdocids", "dumphitlist", "dumpdict",
"check", "htmlstrip", "morph", "buildidf", "mergeidf", "checkconfig", "fold", "apply-killlists" };
static void SetCmd ( IndextoolCmd_e eCmd )
{
if ( g_eCommand!=IndextoolCmd_e::NOTHING )
{
fprintf ( stdout, "ERROR: multiple commands not supported (%s, %s).\n", g_sCommands[(int)g_eCommand], g_sCommands[(int)eCmd] );
exit ( 1 );
}
g_eCommand = eCmd;
}
// this must be more or less in sync with our daemon index loading code
static bool ReadJsonConfig ( const CSphString & sConfigPath, CSphConfig & hConf, CSphString & sError )
{
if ( !sphIsReadable ( sConfigPath, nullptr ) )
return true;
CSphAutoreader tConfigFile;
if ( !tConfigFile.Open ( sConfigPath, sError ) )
return false;
int iSize = (int)tConfigFile.GetFilesize();
if ( !iSize )
return true;
CSphFixedVector<BYTE> dData ( iSize+1 );
tConfigFile.GetBytes ( dData.Begin(), iSize );
if ( tConfigFile.GetErrorFlag() )
{
sError = tConfigFile.GetErrorMessage();
return false;
}
dData[iSize] = 0; // safe gap
JsonObj_c tRoot ( (const char*)dData.Begin() );
if ( tRoot.GetError ( (const char *)dData.Begin(), dData.GetLength(), sError ) )
return false;
// check indexes
JsonObj_c tIndexes = tRoot.GetItem("indexes");
for ( const auto & i : tIndexes )
{
const char * szSection = "index";
if ( !hConf.Exists ( szSection ) )
hConf.Add ( CSphConfigType(), szSection );
const CSphString & sIndexName = i.Name();
if ( hConf[szSection].Exists ( sIndexName ) )
sphDie ( "table '%s' already exists", sIndexName.cstr() );
CSphString sType;
if ( !i.FetchStrItem ( sType, "type", sError ) )
return false;
if ( sType == "distributed" || sType == "template" ) // 'template' is not implemented yet, however obvious
continue;
CSphString sPath;
if ( !i.FetchStrItem ( sPath, "path", sError ) )
return false;
MakeRelativePath ( sPath );
hConf[szSection].Add ( CSphConfigSection(), sIndexName );
CSphConfigSection& tSec = hConf[szSection][sIndexName];
tSec.AddEntry ( "path", sPath.cstr() );
tSec.AddEntry ( "type", sType.cstr() );
tSec.AddEntry ( "from_json", "1" );
}
return true;
}
static bool LoadJsonConfig ( CSphConfig & hConf, const CSphString & sConfigFile )
{
if ( !hConf.Exists("searchd") )
return false;
const CSphConfigSection & hSearchd = hConf["searchd"]["searchd"];
if ( !hSearchd.Exists("data_dir") )
return false;
g_sDataDir = hSearchd["data_dir"].strval();
if ( hConf.Exists("index") || hConf.Exists("source") || !hConf.Exists("searchd") )
{
sphDie ( "'data_dir' cannot be mixed with table declarations in '%s'", sConfigFile.cstr() );
return false;
}
CSphString sError;
if ( !CheckPath ( g_sDataDir, true, sError ) )
{
sphDie ( "data_dir unusable: %s", sError.cstr() );
return false;
}
g_bConfigless = true;
CSphString sConfigPath;
sConfigPath.SetSprintf ( "%s/manticore.json", g_sDataDir.cstr() );
if ( !ReadJsonConfig ( sConfigPath, hConf, sError ) )
sphDie ( "failed to use JSON config %s: %s", sConfigPath.cstr(), sError.cstr() );
return true;
}
static std::unique_ptr<CSphIndex> CreateIndex ( CSphConfig & hConf, CSphString sIndex, bool bDictKeywords, bool bRotate, StrVec_t * pWarnings, CSphString & sError )
{
// don't expect complete index declarations from indexes created with CREATE TABLE
const auto& hIndex = hConf["index"][sIndex];
bool bFromJson = !!hIndex("from_json");
if ( hIndex("type") && hIndex["type"]=="rt" )
{
CSphSchema tSchema;
CSphIndexSettings tSettings;
if ( bFromJson || sphRTSchemaConfigure ( hIndex, tSchema, tSettings, pWarnings, sError, false, false ) )
return sphCreateIndexRT ( std::move ( sIndex ), hIndex["path"].strval(), std::move ( tSchema ), 32*1024*1024, bDictKeywords );
} else
{
StringBuilder_c tPath;
tPath << RedirectToRealPath ( hIndex["path"].strval() ) << ( bRotate ? ".tmp" : nullptr );
return sphCreateIndexPhrase ( std::move ( sIndex ), (CSphString)tPath );
}
return nullptr;
}
static void PreallocIndex ( const char * szIndex, bool bStripPath, CSphIndex * pIndex )
{
SetIndexFilenameBuilder ( CreateFilenameBuilder );
std::unique_ptr<FilenameBuilder_i> pFilenameBuilder = CreateFilenameBuilder ( szIndex );
StrVec_t dWarnings;
if ( !pIndex->Prealloc ( bStripPath, pFilenameBuilder.get(), dWarnings ) )
sphDie ( "table '%s': prealloc failed: %s\n", szIndex, pIndex->GetLastError().cstr() );
for ( const auto & i : dWarnings )
fprintf ( stdout, "WARNING: table %s: %s\n", szIndex, i.cstr() );
}
static void Init()
{
// threads should be initialized before memory allocations
char cTopOfMainStack;
Threads::Init();
Threads::PrepareMainThread ( &cTopOfMainStack );
auto iThreads = GetNumLogicalCPUs();
// iThreads = 1; // uncomment if want to run all coro tests in single thread
SetMaxChildrenThreads ( iThreads );
StartGlobalWorkPool();
WipeGlobalSchedulerOnShutdownAndFork();
}
int main ( int argc, char ** argv )
{
Init();
AT_SCOPE_EXIT ( []() { StopGlobalWorkPool(); });
CSphString sError, sErrorSI, sErrorKNN;
bool bColumnarError = !InitColumnar ( sError );
bool bSecondaryError = !InitSecondary ( sErrorSI );
bool bKNNError = !InitKNN ( sErrorKNN );
if ( bColumnarError )
fprintf ( stdout, "Error initializing columnar storage: %s", sError.cstr() );
if ( bSecondaryError )
fprintf ( stdout, "Error initializing secondary index: %s", sErrorSI.cstr() );
if ( bKNNError )
fprintf ( stdout, "Error initializing knn index: %s", sErrorKNN.cstr() );
if ( argc<=1 )
{
ShowVersion();
ShowHelp();
exit ( 0 );
}
//////////////////////
// parse command line
//////////////////////
#define OPT(_a1,_a2) else if ( !strcmp(argv[i],_a1) || !strcmp(argv[i],_a2) )
#define OPT1(_a1) else if ( !strcmp(argv[i],_a1) )
const char * sOptConfig = NULL;
CSphString sDumpHeader, sIndex, sKeyword, sFoldFile;
bool bWordid = false;
bool bStripPath = false;
StrVec_t dFiles;
CSphString sOut;
bool bStats = false;
bool bSkipUnique = false;
CSphString sDumpDict;
bool bTraceToStdout = true;
bool bRotate = false;
bool bCheckIdDups = false;
int iCheckChunk = -1;
DocID_t iExtractDocid = -1;
int i;
for ( i=1; i<argc; i++ )
{
// handle argless options
if ( argv[i][0]!='-' ) break;
OPT ( "-q", "--quiet" ) { bTraceToStdout = false; continue; }
OPT1 ( "--strip-path" ) { bStripPath = true; continue; }
OPT1 ( "--checkconfig" ) { SetCmd ( IndextoolCmd_e::CHECKCONFIG ); continue; }
OPT1 ( "--rotate" ) { bRotate = true; continue; }
OPT1 ( "-v" ) { ShowVersion(); exit(0); }
OPT ( "-h", "--help" ) { ShowVersion(); ShowHelp(); exit(0); }
OPT1 ( "--apply-killlists" ){ SetCmd ( IndextoolCmd_e::APPLYKLISTS ); continue; }
OPT1 ( "--check-id-dups" ) { bCheckIdDups = true; continue; }
// handle options/commands with 1+ args
if ( (i+1)>=argc ) break;
OPT ( "-c", "--config" ) { sOptConfig = argv[++i]; continue; }
OPT1 ( "--dumpheader" ) { SetCmd ( IndextoolCmd_e::DUMPHEADER ); sDumpHeader = argv[++i]; }
OPT1 ( "--dumpconfig" ) { SetCmd ( IndextoolCmd_e::DUMPCONFIG ); sDumpHeader = argv[++i]; }
OPT1 ( "--dumpdocids" ) { SetCmd ( IndextoolCmd_e::DUMPDOCIDS ); sIndex = argv[++i]; }
OPT1 ( "--check" ) { SetCmd ( IndextoolCmd_e::CHECK ); sIndex = argv[++i]; }
OPT1 ( "--htmlstrip" ) { SetCmd ( IndextoolCmd_e::STRIP ); sIndex = argv[++i]; }
OPT1 ( "--morph" ) { SetCmd ( IndextoolCmd_e::MORPH ); sIndex = argv[++i]; }
OPT1 ( "--dumpdict" )
{
SetCmd ( IndextoolCmd_e::DUMPDICT );
sDumpDict = argv[++i];
if ( (i+1)<argc && !strcmp ( argv[i+1], "--stats" ) )
{
bStats = true;
i++;
}
}
OPT1 ( "--fold" )
{
SetCmd ( IndextoolCmd_e::FOLD );
sIndex = argv[++i];
if ( (i+1)<argc && argv[i+1][0]!='-' )
sFoldFile = argv[++i];
}
OPT1 ( "--check-disk-chunk" )
{
iCheckChunk = (int)strtoll ( argv[++i], NULL, 10 ); continue;
}
// options with 2 args
else if ( ( i + 2 ) >= argc ) // NOLINT
{
// not enough args
break;
}
OPT1 ( "--docextract" ) { SetCmd ( IndextoolCmd_e::EXTRACT ); sIndex = argv[++i]; iExtractDocid = strtoll ( argv[++i], NULL, 10 ); }
OPT1 ("--dumphitlist" )
{
SetCmd ( IndextoolCmd_e::DUMPHITLIST );
sIndex = argv[++i];
if ( !strcmp ( argv[i+1], "--wordid" ) )
{
if ( (i+3)<argc )
break; // not enough args
bWordid = true;
i++;
}
sKeyword = argv[++i];
} else if ( !strcmp ( argv[i], "--buildidf" ) || !strcmp ( argv[i], "--mergeidf" ) )
{
SetCmd ( !strcmp ( argv[i], "--buildidf" ) ? IndextoolCmd_e::BUILDIDF : IndextoolCmd_e::MERGEIDF );
while ( ++i<argc )
{
if ( !strcmp ( argv[i], "--out" ) )
{
if ( (i+1)>=argc )
break; // too few args
sOut = argv[++i];
} else if ( !strcmp ( argv[i], "--skip-uniq" ) )
{
bSkipUnique = true;
} else if ( argv[i][0]=='-' )
{
break; // unknown switch
} else
{
dFiles.Add ( argv[i] ); // handle everything else as a file name
}
}
break;
} else
{
// unknown option
break;
}
}
if ( bTraceToStdout )
ShowVersion();
if ( i!=argc )
{
fprintf ( stdout, "ERROR: malformed or unknown option near '%s'.\n", argv[i] );
return 1;
}
//////////////////////
// load proper config
//////////////////////
if ( !sphInitCharsetAliasTable ( sError ) )
sphDie ( "failed to init charset alias table: %s", sError.cstr() );
sphCollationInit ();
SetupLemmatizerBase();
auto hConf = sphLoadConfigWithoutIndexes ( sOptConfig, bTraceToStdout );
// can't reuse the code from searchdconfig, using a simplified version here
LoadJsonConfig ( hConf, sOptConfig );
// no indexes in both .json and .conf?
if ( !hConf ( "index" ) )
sphDie ( "no tables found in config file '%s'", sOptConfig );
while (true)
{
if ( g_eCommand==IndextoolCmd_e::DUMPHEADER && sDumpHeader.Ends ( ".meta" ) )
{
InfoMeta ( sDumpHeader );
return 0;
}
if ( g_eCommand==IndextoolCmd_e::DUMPDICT && !sDumpDict.Ends ( ".spi" ) )
sIndex = sDumpDict;
break;
}
///////////
// action!
///////////
if ( g_eCommand==IndextoolCmd_e::CHECKCONFIG )
{
fprintf ( stdout, "config valid\nchecking table(s) ... " );
bool bError = false;
// config parser made sure that index(es) present
for ( const auto& tIndex : hConf["index"] )
{
const CSphConfigSection & hIndex = tIndex.second;
const CSphVariant * pPath = hIndex ( "path" );
if ( !pPath )
continue;
const CSphVariant * pType = hIndex ( "type" );
if ( pType && ( *pType=="rt" || *pType=="distributed" || *pType=="percolate" ) )
continue;
// checking index presence by sph file available
CSphString sHeader;
sHeader.SetSprintf ( "%s.sph", pPath->cstr() );
CSphAutoreader rdHeader;
if ( !rdHeader.Open ( sHeader, sError ) )
{
// nice looking output
if ( !bError )
fprintf ( stdout, "\nmissed table(s): '%s'", tIndex.first.cstr() );
else
fprintf ( stdout, ", '%s'", tIndex.first.cstr() );
bError = true;
}
}
if ( !bError )
{
fprintf ( stdout, "ok\n" );
exit ( 0 );
} else
{
fprintf ( stdout, "\n" );
exit ( 1 );
}
}
if ( g_eCommand==IndextoolCmd_e::APPLYKLISTS )
{
ApplyKilllists ( hConf );
exit (0);
}
// configure common settings (as of time of this writing, AOT and ICU setup)
sphConfigureCommon ( hConf );
// common part for several commands, check and preload index
std::unique_ptr<CSphIndex> pIndex;
while ( !sIndex.IsEmpty() )
{
// check config
if ( !hConf["index"].Exists(sIndex) )
sphDie ( "table '%s': no such table in config\n", sIndex.cstr() );
// only need config-level settings for --htmlstrip
if ( g_eCommand==IndextoolCmd_e::STRIP )
break;
CSphVariant * pType = hConf["index"][sIndex]("type");
if ( pType && ( *pType=="distributed" || *pType=="percolate" ) )
sphDie ( "table '%s': check of '%s' type is not supported'\n", sIndex.cstr(), pType->cstr() );
if ( !hConf["index"][sIndex]("path") )
sphDie ( "table '%s': missing 'path' in config'\n", sIndex.cstr() );
// preload that index
bool bDictKeywords = true;
if ( hConf["index"][sIndex].Exists ( "dict" ) )
bDictKeywords = ( hConf["index"][sIndex]["dict"]!="crc" );
StrVec_t dWarnings;
pIndex = CreateIndex ( hConf, sIndex, bDictKeywords, bRotate, &dWarnings, sError );
for ( const auto & i : dWarnings )
fprintf ( stdout, "WARNING: table '%s': %s\n", sIndex.cstr(), i.cstr() );
if ( !pIndex )
sphDie ( "table '%s': failed to create (%s)", sIndex.cstr(), sError.cstr() );
if ( g_eCommand!=IndextoolCmd_e::DUMPDOCIDS && g_eCommand!=IndextoolCmd_e::DUMPDICT )
pIndex->SetDebugCheck ( bCheckIdDups, iCheckChunk );
Threads::CallCoroutine ( [&] {
PreallocIndex ( sIndex.cstr(), bStripPath, pIndex.get() );
if ( g_eCommand==IndextoolCmd_e::MORPH )
return;
if ( !(g_eCommand==IndextoolCmd_e::CHECK || g_eCommand==IndextoolCmd_e::EXTRACT ))
pIndex->Preread();
if ( hConf["index"][sIndex]("hitless_words") )
{
CSphIndexSettings tSettings = pIndex->GetSettings();
const CSphString & sValue = hConf["index"][sIndex]["hitless_words"].strval();
if ( sValue=="all" )
{
tSettings.m_eHitless = SPH_HITLESS_ALL;
} else
{
tSettings.m_eHitless = SPH_HITLESS_SOME;
tSettings.m_sHitlessFiles = sValue;
}
pIndex->Setup ( tSettings );
}
});
break;
}
int iCheckErrno = 0;
CSphString sNewIndex;
// do the dew
switch ( g_eCommand )
{
case IndextoolCmd_e::NOTHING:
sphDie ( "nothing to do; specify a command (run indextool w/o switches for help)" );
case IndextoolCmd_e::DUMPHEADER:
case IndextoolCmd_e::DUMPCONFIG:
{
CSphString sIndexName = "(none)";
if ( hConf("index") && hConf["index"](sDumpHeader) )
{
fprintf ( stdout, "dumping header for table '%s'...\n", sDumpHeader.cstr() );
if ( !hConf["index"][sDumpHeader]("path") )
sphDie ( "missing 'path' for table '%s'\n", sDumpHeader.cstr() );
sIndexName = sDumpHeader;
sDumpHeader.SetSprintf ( "%s.sph", RedirectToRealPath ( hConf["index"][sDumpHeader]["path"].strval() ).cstr() );
} else
fprintf ( stdout, "dumping header file '%s'...\n", sDumpHeader.cstr() );
pIndex = sphCreateIndexPhrase ( sIndexName, "" );
pIndex->DebugDumpHeader ( stdout, sDumpHeader, g_eCommand==IndextoolCmd_e::DUMPCONFIG );
break;
}
case IndextoolCmd_e::DUMPDOCIDS:
fprintf ( stdout, "dumping docids for table '%s'...\n", sIndex.cstr() );
pIndex->DebugDumpDocids ( stdout );
break;
case IndextoolCmd_e::DUMPHITLIST:
fprintf ( stdout, "dumping hitlist for table '%s' keyword '%s'...\n", sIndex.cstr(), sKeyword.cstr() );
pIndex->DebugDumpHitlist ( stdout, sKeyword.cstr(), bWordid );
break;
case IndextoolCmd_e::DUMPDICT:
{
if ( sDumpDict.Ends ( ".spi" ) )
{
fprintf ( stdout, "dumping dictionary file '%s'...\n", sDumpDict.cstr() );
sIndex = sDumpDict.SubString ( 0, sDumpDict.Length()-4 );
pIndex = sphCreateIndexPhrase ( sIndex, sIndex );
if ( !pIndex )
sphDie ( "table '%s': failed to create (%s)", sIndex.cstr(), sError.cstr() );
StrVec_t dWarnings;
if ( !pIndex->Prealloc ( bStripPath, nullptr, dWarnings ) )
sphDie ( "table '%s': prealloc failed: %s\n", sIndex.cstr(), pIndex->GetLastError().cstr() );
for ( const auto & sWarning : dWarnings )
fprintf ( stdout, "WARNING: table %s: %s\n", sIndex.cstr(), sWarning.cstr() );
pIndex->Preread();
} else
{
fprintf ( stdout, "dumping dictionary for table '%s'...\n", sIndex.cstr() );
}
if ( bStats )
fprintf ( stdout, "total-documents: " INT64_FMT "\n", pIndex->GetStats().m_iTotalDocuments );
pIndex->DebugDumpDict ( stdout, false );
break;
}
case IndextoolCmd_e::CHECK:
case IndextoolCmd_e::EXTRACT:
fprintf ( stdout, "checking table '%s'...\n", sIndex.cstr() );
{
std::unique_ptr<DebugCheckError_i> pReporter { MakeDebugCheckError ( stdout, ( g_eCommand == IndextoolCmd_e::CHECK ? nullptr : &iExtractDocid ) ) };
iCheckErrno = pIndex->DebugCheck ( *pReporter, nullptr );
}
if ( iCheckErrno )
return iCheckErrno;
if ( bRotate )
{
pIndex->Dealloc();
sNewIndex.SetSprintf ( "%s.new", RedirectToRealPath ( hConf["index"][sIndex]["path"].strval() ).cstr() );
if ( !pIndex->Rename ( sNewIndex ) )
sphDie ( "table '%s': rotate failed: %s\n", sIndex.cstr(), pIndex->GetLastError().cstr() );
}
return 0;
case IndextoolCmd_e::STRIP:
{
const CSphConfigSection & hIndex = hConf["index"][sIndex];
if ( hIndex.GetInt ( "html_strip" )==0 )
sphDie ( "HTML stripping is not enabled in table '%s'", sIndex.cstr() );
StripStdin ( hIndex.GetStr ( "html_index_attrs" ).cstr(), hIndex.GetStr ( "html_remove_elements" ).cstr() );
}
break;
case IndextoolCmd_e::MORPH:
ApplyMorphology ( pIndex.get() );
break;
case IndextoolCmd_e::BUILDIDF:
if ( !BuildIDF ( sOut, dFiles, sError, bSkipUnique ) )
sphDie ( "ERROR: %s\n", sError.cstr() );
break;
case IndextoolCmd_e::MERGEIDF:
if ( !MergeIDF ( sOut, dFiles, sError, bSkipUnique ) )
sphDie ( "ERROR: %s\n", sError.cstr() );
break;
case IndextoolCmd_e::FOLD:
{
FILE * fp = stdin;
if ( !sFoldFile.IsEmpty() )
{
fp = fopen ( sFoldFile.cstr(), "rb" );
if ( !fp )
sphDie ( "failed to topen %s\n", sFoldFile.cstr() );
}
CharsetFold ( pIndex.get(), fp );
if ( fp!=stdin )
fclose ( fp );
}
break;
default:
sphDie ( "INTERNAL ERROR: unhandled command (id=%d)", (int)g_eCommand );
}
Threads::CallCoroutine ( [&] {
pIndex = nullptr; // need to reset index prior to release of the libraries
});
ShutdownColumnar();
ShutdownSecondary();
ShutdownKNN();
return 0;
}
| 48,953
|
C++
|
.cpp
| 1,366
| 32.756955
| 164
| 0.661561
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,880
|
sphinxqcache.cpp
|
manticoresoftware_manticoresearch/src/sphinxqcache.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxqcache.h"
#include "exprtraits.h"
#include "mini_timer.h"
//////////////////////////////////////////////////////////////////////////
// QUERY CACHE
//////////////////////////////////////////////////////////////////////////
// TODO: maybe optmized storage for const weight frames?
// TODO: stop accumulating once entry is bigger than max total cache size
// TODO: maybe estimate and report peak temporary RAM usage
// TODO: maybe account and report locking time
#define QCACHE_NO_ENTRY (NULL)
#define QCACHE_DEAD_ENTRY ((QcacheEntry_c*)-1)
/// query cache
class Qcache_c : public QcacheStatus_t
{
private:
CSphMutex m_tLock; ///< hash lock
CSphVector<QcacheEntry_c*> m_hData; ///< our little queries hash
int m_iMaxQueries; ///< max load
int m_iMruHead; ///< most recently used entry
public:
Qcache_c();
~Qcache_c();
void Setup ( int64_t iMaxBytes, int iThreshMsec, int iTtlSec );
void Add ( const CSphQuery & q, QcacheEntry_c * pResult, const ISphSchema & tSorterSchema );
QcacheEntry_c * Find ( int64_t iIndexId, const CSphQuery & q, const ISphSchema & tSorterSchema );
void DeleteIndex ( int64_t iIndexId ) EXCLUDES ( m_tLock );
private:
static uint64_t GetKey ( int64_t iIndexId, const CSphQuery & q );
bool IsValidEntry ( int i ) { return m_hData[i]!=QCACHE_NO_ENTRY && m_hData[i]!=QCACHE_DEAD_ENTRY; }
void EnforceLimits ( bool bSizeOnly ) EXCLUDES ( m_tLock );
void MruToHead ( int iRes );
void DeleteEntry ( int iEntry );
bool CanCacheQuery ( const CSphQuery & q ) const;
};
/// ranker that servers cached results
class QcacheRanker_c final : public ISphRanker
{
protected:
QcacheEntryRefPtr_t m_pEntry; ///< cache entry we are decoding
CSphMatch m_dMatches [ QcacheEntry_c::MAX_FRAME_SIZE ]; ///< matches buffer
BYTE * m_pCur; ///< current position in compressed data
BYTE * m_pMax; ///< max position in compressed data
RowID_t m_uLastId = INVALID_ROWID; ///< docid delta decoder state
const CSphIndex * m_pIndex;
CSphQueryContext * m_pCtx;
void ResetImpl ( const ISphQwordSetup & tSetup ); // to avoid call real virtual Reset() from c-tr.
public:
explicit QcacheRanker_c ( QcacheEntry_c * pEntry, const ISphQwordSetup & tSetup );
CSphMatch * GetMatchesBuffer() final { return m_dMatches; }
int GetMatches() final;
void Reset ( const ISphQwordSetup & tSetup ) final { ResetImpl ( tSetup ); }
bool IsCache() const final { return true; }
NodeEstimate_t Estimate ( int64_t iTotalDocs ) const final { return { 0.0f, 0, 0 }; }
};
/// query cache instance
static Qcache_c g_Qcache;
//////////////////////////////////////////////////////////////////////////
void QcacheEntry_c::Append ( RowID_t uRowid, DWORD uWeight )
{
m_iTotalMatches++;
QcacheMatch_t & m = m_dFrame.Add();
m.m_tRowID = uRowid;
m.m_uWeight = uWeight;
if ( m_dFrame.GetLength()==MAX_FRAME_SIZE )
FlushFrame();
}
static inline int NumBytes ( RowID_t tValue )
{
int iRes = 0;
while ( tValue!=0 )
{
tValue >>= 8;
iRes++;
}
return iRes;
}
void QcacheEntry_c::RankerReset()
{
FlushFrame();
// 01000000 is a delta restart marker
m_dData.Add(0x40);
m_tLastRowID = INVALID_ROWID;
}
void QcacheEntry_c::FlushFrame()
{
/////////////////////////////
// store an incomplete frame
/////////////////////////////
if ( !m_dFrame.GetLength() )
return;
if ( m_dFrame.GetLength()<MAX_FRAME_SIZE )
{
// begin with two marker bytes
// 100wwddd 00lllll
// ww = 1..4 bytes per weight
// ddd = 1..8 bytes per delta
// lllll = 1..31 {delta,weight} pairs
int iDeltaBytes = 1;
int iWeightBytes = 1;
RowID_t tLastId = m_tLastRowID;
ARRAY_FOREACH ( i, m_dFrame )
{
RowID_t tDelta = m_dFrame[i].m_tRowID - tLastId - 1;
tLastId = m_dFrame[i].m_tRowID;
iDeltaBytes = Max ( iDeltaBytes, NumBytes ( tDelta ) );
iWeightBytes = Max ( iWeightBytes, NumBytes ( m_dFrame[i].m_uWeight ) );
}
assert ( iDeltaBytes>=1 && iDeltaBytes<=8 );
assert ( iWeightBytes>=1 && iWeightBytes<=4 );
m_dData.Add ( (BYTE)( 0x80 + ( iDeltaBytes-1 ) + ( ( iWeightBytes-1 )<<3 ) ) );
m_dData.Add ( (BYTE)m_dFrame.GetLength() );
BYTE * p = m_dData.AddN ( m_dFrame.GetLength()*( iDeltaBytes + iWeightBytes ) );
tLastId = m_tLastRowID;
ARRAY_FOREACH ( i, m_dFrame )
{
RowID_t uDelta = m_dFrame[i].m_tRowID - tLastId - 1;
tLastId = m_dFrame[i].m_tRowID;
memcpy ( p, &uDelta, iDeltaBytes );
p += iDeltaBytes;
memcpy ( p, &m_dFrame[i].m_uWeight, iWeightBytes );
p += iWeightBytes;
}
m_dFrame.Resize(0);
return;
}
///////////////////////////
// store a complete frame
///////////////////////////
assert ( m_dFrame.GetLength()==MAX_FRAME_SIZE );
// frame begins with a marker byte
// marker format is 00iwwddd, with bit meanings as follows
// i = weights can be either indexed or stored directly
// ww = weight indexes (when i==1) can use 1..2 bytes per index
// ww = weight values (when i==0) can use 1..4 bytes per weight
// ddd = docid deltas can use 0..7 bytes per delta
RowID_t uLastId = m_tLastRowID;
int iDeltaBytes = 1;
bool bIndexWeights = ( m_hWeights.GetLength() + MAX_FRAME_SIZE )<=0xffff;
int iWeightBytes = 1;
ARRAY_FOREACH ( i, m_dFrame )
{
RowID_t tDelta = m_dFrame[i].m_tRowID - uLastId - 1;
iDeltaBytes = Max ( iDeltaBytes, NumBytes ( tDelta ) );
uLastId = m_dFrame[i].m_tRowID;
if ( bIndexWeights )
m_dFrame[i].m_uWeight = m_hWeights.FindOrAdd ( m_dFrame[i].m_uWeight, (int)m_hWeights.GetLength() );
iWeightBytes = Max ( iWeightBytes, NumBytes ( m_dFrame[i].m_uWeight ) );
}
// add marker byte
assert ( iDeltaBytes>=1 && iDeltaBytes<=8 );
assert ( iWeightBytes>=1 && iWeightBytes<=4 );
m_dData.Add ( (BYTE)( ( bIndexWeights<<5 ) + ( ( iWeightBytes-1 )<<3 ) + ( iDeltaBytes-1 ) ) );
// encode data
BYTE * p = m_dData.AddN ( MAX_FRAME_SIZE*( iDeltaBytes + iWeightBytes ) );
uLastId = m_tLastRowID;
ARRAY_FOREACH ( i, m_dFrame )
{
RowID_t tDelta = m_dFrame[i].m_tRowID - uLastId - 1;
memcpy ( p, &tDelta, iDeltaBytes );
p += iDeltaBytes;
uLastId = m_dFrame[i].m_tRowID;
memcpy ( p, &m_dFrame[i].m_uWeight, iWeightBytes );
p += iWeightBytes;
}
assert ( p==( m_dData.Begin() + m_dData.GetLength() ) );
m_tLastRowID = m_dFrame.Last().m_tRowID;
m_dFrame.Resize(0);
}
void QcacheEntry_c::Finish()
{
FlushFrame();
m_dFrame.Reset();
// convert indexed weights from hash to array
m_dWeights.Resize ( m_hWeights.GetLength() );
m_dWeights.Fill ( -1 );
int64_t i = 0;
int iWeight;
int * pIndex;
while ( ( pIndex = m_hWeights.Iterate ( &i, &iWeight ) )!=NULL )
{
assert ( *pIndex>=0 && *pIndex<m_dWeights.GetLength() );
m_dWeights [ *pIndex ] = iWeight;
}
#ifndef NDEBUG
ARRAY_FOREACH ( iCheck, m_dWeights )
assert ( m_dWeights[iCheck]>=0 );
#endif
m_hWeights.Reset(0);
m_iElapsedMsec = (int)( ( sphMicroTimer() - m_tmStarted + 500 )/1000 );
}
//////////////////////////////////////////////////////////////////////////
Qcache_c::Qcache_c()
{
// defaults are here
m_iMaxBytes = 16777216;
#ifndef NDEBUG
m_iMaxBytes = 0; // disable qcache in debug builds
#endif
m_iThreshMs = 3000;
m_iTtlS = 60;
m_iCachedQueries = 0;
m_iUsedBytes = 0;
m_iHits = 0;
m_iMruHead = -1;
m_hData.Resize ( 256 );
m_hData.Fill ( QCACHE_NO_ENTRY );
m_iMaxQueries = (int)( m_hData.GetLength()*0.7f );
}
Qcache_c::~Qcache_c()
{
ScopedMutex_t dLock ( m_tLock );
ARRAY_FOREACH ( i, m_hData )
if ( IsValidEntry(i) )
SafeRelease ( m_hData[i] );
}
void Qcache_c::Setup ( int64_t iMaxBytes, int iThreshMsec, int iTtlSec )
{
m_iMaxBytes = Max ( iMaxBytes, 0 );
m_iThreshMs = Max ( iThreshMsec, 0 );
m_iTtlS = Max ( iTtlSec, 1 );
EnforceLimits ( false );
}
void Qcache_c::MruToHead ( int iRes )
{
// already the head? nothing to do
if ( iRes==m_iMruHead )
return;
// detach from previous node, prev.next = my.next
QcacheEntry_c * p = m_hData[iRes];
if ( p->m_iMruPrev>=0 )
m_hData [ p->m_iMruPrev ]->m_iMruNext = p->m_iMruNext;
// detach from next node, next.prev = my.prev
if ( p->m_iMruNext>=0 )
m_hData [ p->m_iMruNext ]->m_iMruPrev = p->m_iMruPrev;
// become the new head
p->m_iMruPrev = -1;
p->m_iMruNext = m_iMruHead;
if ( p->m_iMruNext>=0 )
{
assert ( m_hData [ p->m_iMruNext ]->m_iMruPrev<0 );
m_hData [ p->m_iMruNext ]->m_iMruPrev = iRes;
}
m_iMruHead = iRes;
}
static bool CalcFilterHashes ( CSphVector<uint64_t> & dFilters, const CSphQuery & q, const ISphSchema & tSorterSchema )
{
dFilters.Resize(0);
ARRAY_FOREACH ( i, q.m_dFilters )
{
const CSphFilterSettings & tFS = q.m_dFilters[i];
uint64_t uFilterHash = q.m_dFilters[i].GetHash();
// need this cast because ISphExpr::Command is not const
CSphColumnInfo * pAttr = const_cast<CSphColumnInfo *>(tSorterSchema.GetAttr ( tFS.m_sAttrName.cstr() ));
if ( pAttr )
{
if ( pAttr->m_pExpr )
{
bool bDisableCaching = false;
uFilterHash = pAttr->m_pExpr->GetHash ( tSorterSchema, uFilterHash, bDisableCaching );
if ( bDisableCaching )
return false;
} else
uFilterHash = sphCalcLocatorHash ( pAttr->m_tLocator, uFilterHash );
}
dFilters.Add ( uFilterHash );
}
ARRAY_FOREACH ( i, q.m_dFilterTree )
{
dFilters.Add ( q.m_dFilterTree[i].GetHash() );
}
dFilters.Sort();
return true;
}
void Qcache_c::Add ( const CSphQuery & q, QcacheEntry_c * pResult, const ISphSchema & tSorterSchema )
{
pResult->Finish();
// do not cache too fast queries or too big rsets, for obvious reasons
// do not cache full scans, because we'll get an incorrect empty result set here
if ( pResult->m_iElapsedMsec < m_iThreshMs || pResult->GetSize() > m_iMaxBytes )
return;
if ( !CanCacheQuery(q) )
return;
if ( !CalcFilterHashes ( pResult->m_dFilters, q, tSorterSchema ) )
return; // this query can't be cached because of the nature of expressions in filters
pResult->AddRef();
pResult->m_Key = GetKey ( pResult->m_iIndexId, q );
ScopedMutex_t dLock (m_tLock);
// rehash if needed
if ( m_iCachedQueries>=m_iMaxQueries )
{
CSphVector<QcacheEntry_c*> hNew ( 2*m_hData.GetLength() );
hNew.Fill ( QCACHE_NO_ENTRY );
CSphVector<int> dRemap ( m_hData.GetLength() );
dRemap.Fill ( -1 );
int iLenMask = hNew.GetLength() - 1;
ARRAY_FOREACH ( i, m_hData )
if ( IsValidEntry(i) )
{
int j = m_hData[i]->m_Key & iLenMask;
while ( hNew[j]!=NULL )
j = ( j+1 ) & iLenMask;
hNew[j] = m_hData[i];
dRemap[i] = j;
}
ARRAY_FOREACH ( i, m_hData )
if ( IsValidEntry(i) )
{
QcacheEntry_c * p = hNew [ dRemap[i] ];
if ( p->m_iMruNext>=0 )
p->m_iMruNext = dRemap [ p->m_iMruNext ];
if ( p->m_iMruPrev>=0 )
p->m_iMruPrev = dRemap [ p->m_iMruPrev ];
}
m_hData.SwapData ( hNew );
m_iMruHead = dRemap [ m_iMruHead ];
m_iMaxQueries *= 2;
}
// add entry
int iLenMask = m_hData.GetLength() - 1;
int j = pResult->m_Key & iLenMask;
while ( IsValidEntry(j) )
j = ( j+1 ) & iLenMask;
m_hData[j] = pResult;
m_iCachedQueries++;
m_iUsedBytes += pResult->GetSize();
MruToHead(j);
dLock.Unlock();
EnforceLimits ( true );
}
QcacheEntry_c * Qcache_c::Find ( int64_t iIndexId, const CSphQuery & q, const ISphSchema & tSorterSchema )
{
if ( m_iMaxBytes<=0 )
return nullptr;
if ( !CanCacheQuery(q) )
return nullptr;
uint64_t k = GetKey ( iIndexId, q );
bool bFilterHashesCalculated = false;
CSphVector<uint64_t> dFilters;
ScopedMutex_t dLock (m_tLock);
int64_t tmMin = sphMicroTimer() - int64_t( m_iTtlS)*1000000;
int iLenMask = m_hData.GetLength() - 1;
int iLoop = m_hData.GetLength();
int iRes = -1;
for ( int i = k & iLenMask; m_hData[i]!=QCACHE_NO_ENTRY && iLoop--!=0; i = ( i+1 ) & iLenMask )
{
// check that entry is alive
QcacheEntry_c * e = m_hData[i]; // shortcut
if ( e==QCACHE_DEAD_ENTRY )
continue;
// check if we need to evict this one based on ttl
if ( e->m_tmStarted < tmMin )
{
DeleteEntry(i);
continue;
}
// check that key matches
if ( e->m_Key!=k )
continue;
// check that filters are compatible (ie. that entry filters are a subset of query filters)
if ( !bFilterHashesCalculated )
{
bFilterHashesCalculated = true;
if ( !CalcFilterHashes ( dFilters, q, tSorterSchema ) )
return nullptr; // this query can't be cached because of the nature of expressions in filters
}
int j = 0;
for ( ; j < e->m_dFilters.GetLength(); j++ )
if ( !dFilters.BinarySearch ( e->m_dFilters[j] ) )
break;
// filters are good, return it
if ( j==e->m_dFilters.GetLength() )
{
iRes = i;
m_iHits++;
break;
}
}
QcacheEntry_c * p = nullptr;
if ( iRes>=0 )
{
p = m_hData[iRes];
p->AddRef();
MruToHead(iRes);
}
return p;
}
uint64_t Qcache_c::GetKey ( int64_t iIndexId, const CSphQuery & q )
{
// query cache key combines a bunch of data affecting things:
// - index id
// - MATCH() part
// - ranker
uint64_t k = sphFNV64 ( &iIndexId, sizeof(iIndexId) );
k = sphFNV64cont ( q.m_sQuery.cstr(), k );
k = sphFNV64 ( &q.m_eRanker, 1, k );
if ( q.m_eRanker==SPH_RANK_EXPR )
k = sphFNV64cont ( q.m_sRankerExpr.cstr(), k );
if ( q.m_eRanker==SPH_RANK_PLUGIN )
{
k = sphFNV64cont ( q.m_sUDRanker.cstr(), k );
k = sphFNV64cont ( q.m_sUDRankerOpts.cstr(), k );
}
return k;
}
void Qcache_c::DeleteEntry ( int i )
{
assert ( IsValidEntry(i) );
QcacheEntry_c * p = m_hData[i];
// adjust MRU list
if ( p->m_iMruNext>=0 )
m_hData[p->m_iMruNext]->m_iMruPrev = p->m_iMruPrev;
assert ( p->m_iMruPrev>=0 || m_iMruHead==i );
if ( p->m_iMruPrev>=0 )
m_hData[p->m_iMruPrev]->m_iMruNext = p->m_iMruNext;
else
m_iMruHead = p->m_iMruNext;
// adjust stats
m_iCachedQueries--;
m_iUsedBytes -= p->GetSize();
// release entry
p->Release();
m_hData[i] = QCACHE_DEAD_ENTRY;
}
bool Qcache_c::CanCacheQuery ( const CSphQuery & q ) const
{
return q.m_eMode!=SPH_MATCH_FULLSCAN && !q.m_sQuery.IsEmpty();
}
void Qcache_c::EnforceLimits ( bool bSizeOnly )
{
if ( bSizeOnly && m_iUsedBytes<=m_iMaxBytes )
return;
ScopedMutex_t dLock ( m_tLock );
// first, enforce size limits
int iCur = m_iMruHead;
int64_t iBytes = 0;
while ( iCur>=0 && m_iUsedBytes>m_iMaxBytes )
{
assert ( IsValidEntry(iCur) );
int iNext = m_hData[iCur]->m_iMruNext;
if ( iBytes + m_hData[iCur]->GetSize() > m_iMaxBytes )
DeleteEntry ( iCur );
else
iBytes += m_hData[iCur]->GetSize();
iCur = iNext;
}
if ( bSizeOnly )
return;
// if requested, do a full sweep, and recheck ttl and thresh limits
int64_t tmMin = sphMicroTimer() - int64_t( m_iTtlS)*1000000;
ARRAY_FOREACH ( i, m_hData )
if ( IsValidEntry(i) && ( m_hData[i]->m_tmStarted < tmMin || m_hData[i]->m_iElapsedMsec < m_iThreshMs ) )
DeleteEntry(i);
}
void Qcache_c::DeleteIndex ( int64_t iIndexId )
{
ScopedMutex_t dLock ( m_tLock );
ARRAY_FOREACH ( i, m_hData )
if ( IsValidEntry(i) && m_hData[i]->m_iIndexId==iIndexId )
DeleteEntry(i);
}
//////////////////////////////////////////////////////////////////////////
QcacheRanker_c::QcacheRanker_c ( QcacheEntry_c * pEntry, const ISphQwordSetup & tSetup )
: m_pEntry ( pEntry )
{
SafeAddRef ( pEntry );
ResetImpl ( tSetup );
}
void QcacheRanker_c::ResetImpl ( const ISphQwordSetup & tSetup )
{
m_pCur = m_pEntry->m_dData.Begin();
m_pMax = m_pCur + m_pEntry->m_dData.GetLength();
m_uLastId = INVALID_ROWID;
m_pIndex = tSetup.m_pIndex;
m_pCtx = tSetup.m_pCtx;
for ( auto & m_dMatche : m_dMatches )
m_dMatche.Reset ( tSetup.m_iDynamicRowitems );
}
int QcacheRanker_c::GetMatches()
{
int iRes = 0;
while ( !iRes )
{
// end of buffer? bail
BYTE * p = m_pCur;
if ( p>=m_pMax )
return 0;
// handle delta restart
if ( *p==0x40 )
{
m_uLastId = INVALID_ROWID;
m_pCur++;
continue;
}
// decode next frame header
bool bIndexedWeights = ( *p & 32 )!=0;
int iWeightBytes = 1 + ( ( *p>>3 ) & 3 );
int iDeltaBytes = 1 + ( *p & 7 );
int iMatches = QcacheEntry_c::MAX_FRAME_SIZE;
if ( *p & 128 )
{
iMatches = p[1];
bIndexedWeights = false;
p++;
}
p++;
int iFrameBytes = iMatches * ( iDeltaBytes + iWeightBytes );
if ( p+iFrameBytes > m_pMax )
{
// unexpected frame end? bail
// FIXME? return an error somehow?
m_pCur = m_pMax;
return 0;
}
// decode frame data
for ( int i=0; i<iMatches; i++ )
{
RowID_t uDelta = 0;
memcpy ( &uDelta, p, iDeltaBytes );
p += iDeltaBytes;
m_uLastId += uDelta + 1;
int iWeight = 0;
memcpy ( &iWeight, p, iWeightBytes );
p += iWeightBytes;
if ( bIndexedWeights )
iWeight = m_pEntry->m_dWeights [ iWeight ];
CSphMatch & m = m_dMatches[iRes];
m.m_tRowID = m_uLastId;
m.m_iWeight = iWeight;
// re-filter the cached match with new filters
if ( !m_pIndex->EarlyReject ( m_pCtx, m ) )
iRes++;
}
assert ( p==m_pCur + iFrameBytes + 1 + ( iMatches!=QcacheEntry_c::MAX_FRAME_SIZE ) );
m_pCur = p;
}
return iRes;
}
//////////////////////////////////////////////////////////////////////////
void QcacheAdd ( const CSphQuery & q, QcacheEntry_c * pResult, const ISphSchema & tSorterSchema )
{
return g_Qcache.Add ( q, pResult, tSorterSchema );
}
QcacheEntry_c * QcacheFind ( int64_t iIndexId, const CSphQuery & q, const ISphSchema & tSorterSchema )
{
return g_Qcache.Find ( iIndexId, q, tSorterSchema );
}
std::unique_ptr<ISphRanker> QcacheRanker ( QcacheEntry_c * pEntry, const ISphQwordSetup & tSetup )
{
return std::make_unique<QcacheRanker_c> ( pEntry, tSetup );
}
const QcacheStatus_t & QcacheGetStatus()
{
return g_Qcache;
}
void QcacheSetup ( int64_t iMaxBytes, int iThreshMsec, int iTtlSec )
{
g_Qcache.Setup ( iMaxBytes, iThreshMsec, iTtlSec );
}
void QcacheDeleteIndex ( int64_t iIndexId )
{
g_Qcache.DeleteIndex ( iIndexId );
}
| 18,251
|
C++
|
.cpp
| 574
| 29.24216
| 119
| 0.648641
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
16,881
|
geodist.cpp
|
manticoresoftware_manticoresearch/src/geodist.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "geodist.h"
#include <cmath>
// conversions between degrees and radians
static const double PI = 3.14159265358979323846;
static const double TO_RAD = PI / 180.0;
static const double TO_RAD2 = PI / 360.0;
static const double TO_DEG = 180.0 / PI;
static const float TO_RADF = (float)( PI / 180.0 );
static const float TO_RADF2 = (float)( PI / 360.0 );
static const float TO_DEGF = (float)( 180.0 / PI );
const int GEODIST_TABLE_COS = 1024; // maxerr 0.00063%
const int GEODIST_TABLE_ASIN = 512;
const int GEODIST_TABLE_K = 1024;
static float g_GeoCos[GEODIST_TABLE_COS+1]; ///< cos(x) table
static float g_GeoAsin[GEODIST_TABLE_ASIN+1]; ///< asin(sqrt(x)) table
static float g_GeoFlatK[GEODIST_TABLE_K+1][2]; ///< GeodistAdaptive() flat ellipsoid method k1,k2 coeffs table
/// double argument squared
static FORCE_INLINE double sqr ( double v )
{
return v * v;
}
void GeodistInit()
{
for ( int i=0; i<=GEODIST_TABLE_COS; i++ )
g_GeoCos[i] = (float)cos ( 2*PI*i/GEODIST_TABLE_COS ); // [0, 2pi] -> [0, COSTABLE]
for ( int i=0; i<=GEODIST_TABLE_ASIN; i++ )
g_GeoAsin[i] = (float)asin ( sqrt ( double(i)/GEODIST_TABLE_ASIN ) ); // [0, 1] -> [0, ASINTABLE]
for ( int i=0; i<=GEODIST_TABLE_K; i++ )
{
double x = PI*i/GEODIST_TABLE_K - PI*0.5; // [-pi/2, pi/2] -> [0, KTABLE]
g_GeoFlatK[i][0] = (float) sqr ( 111132.09 - 566.05*cos ( 2*x ) + 1.20*cos ( 4*x ) );
g_GeoFlatK[i][1] = (float) sqr ( 111415.13*cos(x) - 94.55*cos ( 3*x ) + 0.12*cos ( 5*x ) );
}
}
static const double HAVERSINE_EARTH_RADIUS = 6384000.0;
inline float GeodistSphereRad ( float lat1, float lon1, float lat2, float lon2 )
{
static const double D = 2*HAVERSINE_EARTH_RADIUS;
double dlat2 = 0.5*( lat1 - lat2 );
double dlon2 = 0.5*( lon1 - lon2 );
double a = sqr ( sin(dlat2) ) + cos(lat1)*cos(lat2)*sqr ( sin(dlon2) );
double c = asin ( Min ( 1.0, sqrt(a) ) );
return (float)(D*c);
}
inline float GeodistSphereDeg ( float lat1, float lon1, float lat2, float lon2 )
{
static const double D = 2*HAVERSINE_EARTH_RADIUS;
double dlat2 = TO_RAD2*( lat1 - lat2 );
double dlon2 = TO_RAD2*( lon1 - lon2 );
double a = sqr ( sin(dlat2) ) + cos ( TO_RAD*lat1 )*cos ( TO_RAD*lat2 )*sqr ( sin(dlon2) );
double c = asin ( Min ( 1.0, sqrt(a) ) );
return (float)(D*c);
}
static FORCE_INLINE float GeodistDegDiff ( float f )
{
f = (float)fabs(f);
while ( f>360 )
f -= 360;
if ( f>180 )
f = 360-f;
return f;
}
float GeodistFlatDeg ( float fLat1, float fLon1, float fLat2, float fLon2 )
{
double c1 = cos ( TO_RAD2*( fLat1+fLat2 ) );
double c2 = 2*c1*c1-1; // cos(2*t)
double c3 = c1*(2*c2-1); // cos(3*t)
double k1 = 111132.09 - 566.05*c2;
double k2 = 111415.13*c1 - 94.55*c3;
float dlat = GeodistDegDiff ( fLat1-fLat2 );
float dlon = GeodistDegDiff ( fLon1-fLon2 );
return (float)sqrt ( k1*k1*dlat*dlat + k2*k2*dlon*dlon );
}
static FORCE_INLINE float GeodistFastCos ( float x )
{
auto y = (float)(fabs(x)*GEODIST_TABLE_COS/PI/2);
auto i = int(y);
y -= i;
i &= ( GEODIST_TABLE_COS-1 );
return g_GeoCos[i] + ( g_GeoCos[i+1]-g_GeoCos[i] )*y;
}
static FORCE_INLINE float GeodistFastSin ( float x )
{
auto y = float(fabs(x)*GEODIST_TABLE_COS/PI/2);
auto i = int(y);
y -= i;
i = ( i - GEODIST_TABLE_COS/4 ) & ( GEODIST_TABLE_COS-1 ); // cos(x-pi/2)=sin(x), costable/4=pi/2
return g_GeoCos[i] + ( g_GeoCos[i+1]-g_GeoCos[i] )*y;
}
/// fast implementation of asin(sqrt(x))
/// max error in floats 0.00369%, in doubles 0.00072%
static inline float GeodistFastAsinSqrt ( float x )
{
if ( x<0.122 )
{
// distance under 4546km, Taylor error under 0.00072%
auto y = (float)sqrt(x);
return y + x*y*0.166666666666666f + x*x*y*0.075f + x*x*x*y*0.044642857142857f;
}
if ( x<0.948 )
{
// distance under 17083km, 512-entry LUT error under 0.00072%
x *= GEODIST_TABLE_ASIN;
auto i = int(x);
return g_GeoAsin[i] + ( g_GeoAsin[i+1] - g_GeoAsin[i] )*( x-i );
}
return (float)asin ( sqrt(x) ); // distance over 17083km, just compute honestly
}
/// float argument squared
inline float fsqr ( float v )
{
return v * v;
}
const float ADAPTIVE_EARTH_RADIUS = 6371000.0f;
inline float GeodistAdaptiveDeg ( float lat1, float lon1, float lat2, float lon2 )
{
float dlat = GeodistDegDiff ( lat1-lat2 );
float dlon = GeodistDegDiff ( lon1-lon2 );
if ( dlon<13 )
{
// points are close enough; use flat ellipsoid model
// interpolate sqr(k1), sqr(k2) coefficients using latitudes midpoint
float m = ( lat1+lat2+180 )*GEODIST_TABLE_K/360; // [-90, 90] degrees -> [0, KTABLE] indexes
auto i = int(m);
i &= ( GEODIST_TABLE_K-1 );
float kk1 = g_GeoFlatK[i][0] + ( g_GeoFlatK[i+1][0] - g_GeoFlatK[i][0] )*( m-i );
float kk2 = g_GeoFlatK[i][1] + ( g_GeoFlatK[i+1][1] - g_GeoFlatK[i][1] )*( m-i );
return (float)sqrt ( kk1*dlat*dlat + kk2*dlon*dlon );
} else
{
// points too far away; use haversine
static const float D = 2.0f*ADAPTIVE_EARTH_RADIUS;
float a = fsqr ( GeodistFastSin ( dlat*TO_RADF2 ) ) + GeodistFastCos ( lat1*TO_RADF ) * GeodistFastCos ( lat2*TO_RADF ) * fsqr ( GeodistFastSin ( dlon*TO_RADF2 ) );
return (float)( D*GeodistFastAsinSqrt(a) );
}
}
inline float GeodistAdaptiveRad ( float lat1, float lon1, float lat2, float lon2 )
{
// cut-paste-optimize, maybe?
return GeodistAdaptiveDeg ( lat1*TO_DEGF, lon1*TO_DEGF, lat2*TO_DEGF, lon2*TO_DEGF );
}
void GeoTesselate ( CSphVector<float> & dIn )
{
// 1 minute of latitude, max
// (it varies from 1842.9 to 1861.57 at 0 to 90 respectively)
static const float LAT_MINUTE = 1861.57f;
// 1 minute of longitude in metres, at different latitudes
static const float LON_MINUTE[] =
{
1855.32f, 1848.31f, 1827.32f, 1792.51f, // 0, 5, 10, 15
1744.12f, 1682.50f, 1608.10f, 1521.47f, // 20, 25, 30, 35
1423.23f, 1314.11f, 1194.93f, 1066.57f, // 40, 45, 50, 55
930.00f, 786.26f, 636.44f, 481.70f, // 60, 65 70, 75
323.22f, 162.24f, 0.0f // 80, 85, 90
};
// tesselation threshold
// FIXME! make this configurable?
static const float TESSELATE_TRESH = 500000.0f; // 500 km, error under 150m or 0.03%
CSphVector<float> dOut;
for ( int i=0; i<dIn.GetLength(); i+=2 )
{
// add the current vertex in any event
dOut.Add ( dIn[i] );
dOut.Add ( dIn[i+1] );
// get edge lat/lon, convert to radians
bool bLast = ( i==dIn.GetLength()-2 );
float fLat1 = dIn[i];
float fLon1 = dIn[i+1];
float fLat2 = dIn [ bLast ? 0 : (i+2) ];
float fLon2 = dIn [ bLast ? 1 : (i+3) ];
// quick rough geodistance estimation
float fMinLat = Min ( fLat1, fLat2 );
auto iLatBand = (int) floor ( fabs ( fMinLat ) / 5.0f );
iLatBand = iLatBand % 18;
auto d = (float) (60.0f*( LAT_MINUTE*fabs ( fLat1-fLat2 ) + LON_MINUTE [ iLatBand ]*fabs ( fLon1-fLon2 ) ) );
if ( d<=TESSELATE_TRESH )
continue;
// convert to radians
// FIXME! make units configurable
fLat1 *= TO_RADF;
fLon1 *= TO_RADF;
fLat2 *= TO_RADF;
fLon2 *= TO_RADF;
// compute precise geodistance
d = GeodistSphereRad ( fLat1, fLon1, fLat2, fLon2 );
if ( d<=TESSELATE_TRESH )
continue;
int iSegments = (int) ceil ( d / TESSELATE_TRESH );
// compute arc distance
// OPTIMIZE! maybe combine with CalcGeodist?
d = (float)acos ( sin(fLat1)*sin(fLat2) + cos(fLat1)*cos(fLat2)*cos(fLon1-fLon2) );
const auto isd = (float)(1.0f / sin(d));
const auto clat1 = (float)cos(fLat1);
const auto slat1 = (float)sin(fLat1);
const auto clon1 = (float)cos(fLon1);
const auto slon1 = (float)sin(fLon1);
const auto clat2 = (float)cos(fLat2);
const auto slat2 = (float)sin(fLat2);
const auto clon2 = (float)cos(fLon2);
const auto slon2 = (float)sin(fLon2);
for ( int j=1; j<iSegments; j++ )
{
float f = float(j) / float(iSegments); // needed distance fraction
float a = (float)sin ( (1-f)*d ) * isd;
float b = (float)sin ( f*d ) * isd;
float x = a*clat1*clon1 + b*clat2*clon2;
float y = a*clat1*slon1 + b*clat2*slon2;
float z = a*slat1 + b*slat2;
dOut.Add ( (float)( TO_DEG * atan2 ( z, sqrt ( x*x+y*y ) ) ) );
dOut.Add ( (float)( TO_DEG * atan2 ( y, x ) ) );
}
}
// swap 'em results
dIn.SwapData ( dOut );
}
bool GeodistGetSphereBBox ( Geofunc_fn fnFunc, float fLat, float fLon, float fDist, float & fLatMin, float & fLatMax, float & fLonMin, float & fLonMax )
{
bool bAdaptive = fnFunc==GetGeodistFn ( GEO_ADAPTIVE, true ) || fnFunc==GetGeodistFn ( GEO_ADAPTIVE, false );
bool bDeg = fnFunc==GetGeodistFn ( GEO_ADAPTIVE, true ) || fnFunc==GetGeodistFn ( GEO_HAVERSINE, true );
double fLatRad = fLat;
double fLonRad = fLon;
if ( bDeg )
{
fLatRad *= TO_RAD;
fLonRad *= TO_RAD;
}
double fRadius = HAVERSINE_EARTH_RADIUS;
if ( bAdaptive )
fRadius = ADAPTIVE_EARTH_RADIUS;
const double CORRECTION_LAT = 1.003;
const double CORRECTION_LON = 1.10;
double fAngularDist = (double)fDist / fRadius*CORRECTION_LAT;
fLatMin = fLatRad - fAngularDist;
fLatMax = fLatRad + fAngularDist;
// pole is near
if ( fLatMin <= -PI/2.0 || fLatMax >= PI/2.0 )
return false;
// about 100m
const double EPS = 1.570795e-5;
if ( fabs ( fLatRad - PI/2.0 ) <= EPS || fabs ( fLatRad + PI/2.0 ) <= EPS )
return false;
double fDeltaLon = fAngularDist / cos(fLatRad) * CORRECTION_LON;
fLonMin = float(fLonRad - fDeltaLon);
fLonMax = float(fLonRad + fDeltaLon);
// near prime meridian?
if ( fLonMin < -PI )
fLonMin += 2*PI;
if ( fLonMax > PI )
fLonMax -= 2*PI;
if ( fLonMin > fLonMax )
Swap ( fLonMin, fLonMax );
// too near to the poles
if ( fLonMin < -PI || fLonMax > PI )
return false;
if ( bDeg )
{
fLatMin *= TO_DEGF;
fLatMax *= TO_DEGF;
fLonMin *= TO_DEGF;
fLonMax *= TO_DEGF;
}
return true;
}
Geofunc_fn GetGeodistFn ( GeoFunc_e eFunc, bool bDeg )
{
switch ( 2*eFunc+bDeg )
{
case 2*GEO_HAVERSINE: return &GeodistSphereRad;
case 2*GEO_HAVERSINE+1: return &GeodistSphereDeg;
case 2*GEO_ADAPTIVE: return &GeodistAdaptiveRad;
case 2*GEO_ADAPTIVE+1: return &GeodistAdaptiveDeg;
default:;
}
return nullptr;
}
float CalcGeodist ( GeoFunc_e eFunc, bool bDeg, float lat1, float lon1, float lat2, float lon2 )
{
return GetGeodistFn ( eFunc, bDeg ) ( lat1, lon1, lat2, lon2 );
}
bool GeoDistanceUnit ( const char * szUnit, float & fCoeff )
{
struct DistanceUnit_t
{
CSphString m_dNames[3];
float m_fConversion;
};
static DistanceUnit_t dUnits[] =
{
{ { "mi", "miles" }, 1609.34f },
{ { "yd", "yards" }, 0.9144f },
{ { "ft", "feet" }, 0.3048f },
{ { "in", "inch" }, 0.0254f },
{ { "km", "kilometers" }, 1000.0f },
{ { "m", "meters" }, 1.0f },
{ { "cm", "centimeters" }, 0.01f },
{ { "mm", "millimeters" }, 0.001f },
{ { "NM", "nmi", "nauticalmiles" }, 1852.0f }
};
if ( !szUnit || !*szUnit )
{
fCoeff = 1.0f;
return true;
}
for ( const auto & i : dUnits )
for ( const auto & j : i.m_dNames )
if ( j==szUnit )
{
fCoeff = i.m_fConversion;
return true;
}
return false;
}
| 11,340
|
C++
|
.cpp
| 323
| 32.755418
| 166
| 0.657443
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,882
|
sphinxjsonquery.cpp
|
manticoresoftware_manticoresearch/src/sphinxjsonquery.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxquery.h"
#include "sphinxsearch.h"
#include "sphinxplugin.h"
#include "sphinxutils.h"
#include "searchdaemon.h"
#include "jsonqueryfilter.h"
#include "attribute.h"
#include "searchdsql.h"
#include "knnmisc.h"
#include "datetime.h"
#include "json/cJSON.h"
static const char * g_szAll = "_all";
static const char * g_szHighlight = "_@highlight_";
static const char * g_szOrder = "_@order_";
class QueryTreeBuilder_c;
struct ErrorPathGuard_t
{
ErrorPathGuard_t ( QueryTreeBuilder_c & tBuilder, bool bEnabled, const JsonObj_c & tPath );
~ErrorPathGuard_t ();
QueryTreeBuilder_c & m_tBuilder;
const bool m_bEnabled;
};
class QueryTreeBuilder_c : public XQParseHelper_c
{
public:
QueryTreeBuilder_c ( const CSphQuery * pQuery, TokenizerRefPtr_c pQueryTokenizerQL, const CSphIndexSettings & tSettings );
void CollectKeywords ( const char * szStr, XQNode_t * pNode, const XQLimitSpec_t & tLimitSpec, float fBoost );
bool HandleFieldBlockStart ( const char * & /*pPtr*/ ) override { return true; }
bool HandleSpecialFields ( const char * & pPtr, FieldMask_t & dFields ) override;
bool NeedTrailingSeparator() override { return false; }
XQNode_t * CreateNode ( XQLimitSpec_t & tLimitSpec );
const TokenizerRefPtr_c & GetQLTokenizer() { return m_pQueryTokenizerQL; }
const CSphIndexSettings & GetIndexSettings() { return m_tSettings; }
const CSphQuery * GetQuery() { return m_pQuery; }
bool m_bHasFulltext = false;
bool m_bHasFilter = false;
void ResetNodesFlags() { m_bHasFulltext = m_bHasFilter = false; }
QueryTreeBuilder_c CreateCollectPath ( const CSphSchema * pSchema );
void ErrorPrintPath ( QueryTreeBuilder_c & tOrig );
ErrorPathGuard_t ErrorAddPath ( const JsonObj_c & tPath );
private:
const CSphQuery * m_pQuery {nullptr};
const TokenizerRefPtr_c m_pQueryTokenizerQL;
const CSphIndexSettings & m_tSettings;
XQNode_t * AddChildKeyword ( XQNode_t * pParent, const char * szKeyword, int iSkippedPosBeforeToken, const XQLimitSpec_t & tLimitSpec, float fBoost );
friend ErrorPathGuard_t;
CSphVector< std::pair<CSphString, const void *> > m_dErrorPath;
bool m_bErrorCollectPath = false;
};
QueryTreeBuilder_c::QueryTreeBuilder_c ( const CSphQuery * pQuery, TokenizerRefPtr_c pQueryTokenizerQL, const CSphIndexSettings & tSettings )
: m_pQuery ( pQuery )
, m_pQueryTokenizerQL ( std::move (pQueryTokenizerQL) )
, m_tSettings ( tSettings )
{}
void QueryTreeBuilder_c::CollectKeywords ( const char * szStr, XQNode_t * pNode, const XQLimitSpec_t & tLimitSpec, float fBoost )
{
m_pTokenizer->SetBuffer ( (const BYTE*)szStr, (int) strlen ( szStr ) );
while (true)
{
int iSkippedPosBeforeToken = 0;
if ( m_bWasBlended )
{
iSkippedPosBeforeToken = m_pTokenizer->SkipBlended();
// just add all skipped blended parts except blended head (already added to atomPos)
if ( iSkippedPosBeforeToken>1 )
m_iAtomPos += iSkippedPosBeforeToken - 1;
}
// FIXME!!! only wildcard node need tokes with wildcard symbols
const char * sToken = (const char *) m_pTokenizer->GetToken ();
if ( !sToken )
{
AddChildKeyword ( pNode, nullptr, iSkippedPosBeforeToken, tLimitSpec, fBoost );
break;
}
// now let's do some token post-processing
m_bWasBlended = m_pTokenizer->TokenIsBlended();
int iPrevDeltaPos = 0;
if ( m_pPlugin && m_pPlugin->m_fnPushToken )
sToken = m_pPlugin->m_fnPushToken ( m_pPluginData, const_cast<char*>(sToken), &iPrevDeltaPos, m_pTokenizer->GetTokenStart(), int ( m_pTokenizer->GetTokenEnd() - m_pTokenizer->GetTokenStart() ) );
m_iAtomPos += 1 + iPrevDeltaPos;
bool bMultiDestHead = false;
bool bMultiDest = false;
int iDestCount = 0;
// do nothing inside phrase
if ( !m_pTokenizer->IsPhraseMode() )
bMultiDest = m_pTokenizer->WasTokenMultiformDestination ( bMultiDestHead, iDestCount );
// check for stopword, and create that node
// temp buffer is required, because GetWordID() might expand (!) the keyword in-place
BYTE sTmp [ MAX_TOKEN_BYTES ];
strncpy ( (char*)sTmp, sToken, MAX_TOKEN_BYTES );
sTmp[MAX_TOKEN_BYTES-1] = '\0';
int iStopWord = 0;
if ( m_pPlugin && m_pPlugin->m_fnPreMorph )
m_pPlugin->m_fnPreMorph ( m_pPluginData, (char*)sTmp, &iStopWord );
SphWordID_t uWordId = iStopWord ? 0 : m_pDict->GetWordID ( sTmp );
if ( uWordId && m_pPlugin && m_pPlugin->m_fnPostMorph )
{
int iRes = m_pPlugin->m_fnPostMorph ( m_pPluginData, (char*)sTmp, &iStopWord );
if ( iStopWord )
uWordId = 0;
else if ( iRes )
uWordId = m_pDict->GetWordIDNonStemmed ( sTmp );
}
if ( !uWordId )
{
sToken = nullptr;
// stopwords with step=0 must not affect pos
if ( m_bEmptyStopword )
m_iAtomPos--;
}
XQNode_t * pChildNode = nullptr;
if ( bMultiDest && !bMultiDestHead )
{
assert ( m_dMultiforms.GetLength() );
m_dMultiforms.Last().m_iDestCount++;
m_dDestForms.Add ( sToken );
} else
pChildNode = AddChildKeyword ( pNode, sToken, iSkippedPosBeforeToken, tLimitSpec, fBoost );
if ( bMultiDestHead )
{
MultiformNode_t & tMulti = m_dMultiforms.Add();
tMulti.m_pNode = pChildNode;
tMulti.m_iDestStart = m_dDestForms.GetLength();
tMulti.m_iDestCount = 0;
}
}
}
bool QueryTreeBuilder_c::HandleSpecialFields ( const char * & pPtr, FieldMask_t & dFields )
{
if ( *pPtr=='_' )
{
auto iLen = (int) strlen(g_szAll);
if ( !strncmp ( pPtr, g_szAll, iLen ) )
{
pPtr += iLen;
dFields.SetAll();
return true;
}
}
return false;
}
XQNode_t * QueryTreeBuilder_c::CreateNode ( XQLimitSpec_t & tLimitSpec )
{
auto * pNode = new XQNode_t(tLimitSpec);
m_dSpawned.Add ( pNode );
return pNode;
}
XQNode_t * QueryTreeBuilder_c::AddChildKeyword ( XQNode_t * pParent, const char * szKeyword, int iSkippedPosBeforeToken, const XQLimitSpec_t & tLimitSpec, float fBoost )
{
XQKeyword_t tKeyword ( szKeyword, m_iAtomPos );
tKeyword.m_iSkippedBefore = iSkippedPosBeforeToken;
tKeyword.m_fBoost = fBoost;
auto * pNode = new XQNode_t ( tLimitSpec );
pNode->m_pParent = pParent;
pNode->m_dWords.Add ( tKeyword );
pParent->m_dChildren.Add ( pNode );
m_dSpawned.Add ( pNode );
return pNode;
}
ErrorPathGuard_t QueryTreeBuilder_c::ErrorAddPath ( const JsonObj_c & tPath )
{
return ErrorPathGuard_t ( *this, m_bErrorCollectPath, tPath );
}
void QueryTreeBuilder_c::ErrorPrintPath ( QueryTreeBuilder_c & tOrig )
{
assert ( IsError() );
StringBuilder_c tBuilder;
tBuilder.Appendf ( "%s at '", tOrig.m_pParsed->m_sParseError.cstr() );
const void * pLast = nullptr;
for ( const auto & tEntry : m_dErrorPath )
{
// skip duplicates
if ( !tEntry.second || pLast!=tEntry.second )
tBuilder.Appendf ( "/%s", tEntry.first.scstr() );
pLast = tEntry.second;
}
tBuilder << "'";
tOrig.m_pParsed->m_sParseError = (CSphString)tBuilder;
}
QueryTreeBuilder_c QueryTreeBuilder_c::CreateCollectPath ( const CSphSchema * pSchema )
{
QueryTreeBuilder_c tOther ( m_pQuery, std::move ( m_pQueryTokenizerQL ), m_tSettings );
tOther.Setup ( pSchema, m_pTokenizer->Clone ( SPH_CLONE ), std::move ( m_pDict ), m_pParsed, m_tSettings );
tOther.m_bErrorCollectPath = true;
tOther.m_dErrorPath.Add ( { "query", nullptr } );
return tOther;
}
ErrorPathGuard_t::ErrorPathGuard_t ( QueryTreeBuilder_c & tBuilder, bool bEnabled, const JsonObj_c & tPath )
: m_tBuilder ( tBuilder )
, m_bEnabled ( bEnabled )
{
// add path entry only in the collect pass and only prior to error point
if ( m_bEnabled && !m_tBuilder.IsError() )
m_tBuilder.m_dErrorPath.Add ( { tPath.Name(), tPath.GetRoot() } );
}
ErrorPathGuard_t::~ErrorPathGuard_t ()
{
if ( m_bEnabled && !m_tBuilder.IsError() )
m_tBuilder.m_dErrorPath.Pop();
}
//////////////////////////////////////////////////////////////////////////
class QueryParserJson_c : public QueryParser_i
{
public:
bool IsFullscan ( const CSphQuery & tQuery ) const final;
bool IsFullscan ( const XQQuery_t & tQuery ) const final;
bool ParseQuery ( XQQuery_t & tParsed, const char * sQuery, const CSphQuery * pQuery, TokenizerRefPtr_c pQueryTokenizer, TokenizerRefPtr_c pQueryTokenizerJson, const CSphSchema * pSchema, const DictRefPtr_c& pDict, const CSphIndexSettings & tSettings, const CSphBitvec * pMorphFields ) const final;
QueryParser_i * Clone() const final { return new QueryParserJson_c; }
private:
XQNode_t * ConstructMatchNode ( const JsonObj_c & tJson, bool bPhrase, bool bTerms, bool bSingleTerm, QueryTreeBuilder_c & tBuilder ) const;
XQNode_t * ConstructBoolNode ( const JsonObj_c & tJson, QueryTreeBuilder_c & tBuilder ) const;
XQNode_t * ConstructQLNode ( const JsonObj_c & tJson, QueryTreeBuilder_c & tBuilder ) const;
XQNode_t * ConstructMatchAllNode ( QueryTreeBuilder_c & tBuilder ) const;
bool ConstructBoolNodeItems ( const JsonObj_c & tClause, CSphVector<XQNode_t *> & dItems, QueryTreeBuilder_c & tBuilder ) const;
bool ConstructNodeOrFilter ( const JsonObj_c & tItem, CSphVector<XQNode_t *> & dNodes, QueryTreeBuilder_c & tBuilder ) const;
XQNode_t * ConstructNode ( const JsonObj_c & tJson, QueryTreeBuilder_c & tBuilder ) const;
};
bool QueryParserJson_c::IsFullscan ( const CSphQuery & tQuery ) const
{
const char * szQ = tQuery.m_sQuery.cstr();
if ( !szQ ) return true;
if ( strstr ( szQ, R"("match")" ) ) return false;
if ( strstr ( szQ, R"("terms")" ) ) return false;
if ( strstr ( szQ, R"("match_phrase")" ) ) return false;
if ( strstr ( szQ, R"("term")" ) ) return false;
if ( strstr ( szQ, R"("query_string")" ) ) return false;
if ( strstr ( szQ, R"("simple_query_string")" ) ) return false;
return true;
}
bool QueryParserJson_c::IsFullscan ( const XQQuery_t & tQuery ) const
{
return !( tQuery.m_pRoot && ( tQuery.m_pRoot->m_dChildren.GetLength () || tQuery.m_pRoot->m_dWords.GetLength () ) );
}
static bool IsFullText ( const CSphString & sName );
static bool IsBoolNode ( const CSphString & sName );
bool CheckRootNode ( const JsonObj_c & tRoot, CSphString & sError )
{
bool bFilter = false;
bool bBool = false;
bool bFullText = false;
for ( const auto & tItem : tRoot )
{
const CSphString & sName = tItem.Name();
if ( IsFilter ( tItem ) )
{
if ( bFilter )
{
sError = "\"query\" has multiple filter properties, use bool node";
return false;
}
bFilter = true;
}
else if ( IsBoolNode ( sName ) )
{
if ( bBool )
{
sError = "\"query\" has multiple bool properties";
return false;
}
bBool = true;
}
else if ( IsFullText ( sName ) )
{
if ( bFullText )
{
sError = "\"query\" has multiple full-text properties, use bool node";
return false;
}
bFullText = true;
}
}
return true;
}
static JsonObj_c FindFullTextQueryNode ( const JsonObj_c & tRoot )
{
for ( JsonObj_c tChild : tRoot )
{
if ( !IsFilter ( tChild ) )
return tChild;
}
return tRoot[0];
}
bool QueryParserJson_c::ParseQuery ( XQQuery_t & tParsed, const char * szQuery, const CSphQuery * pQuery, TokenizerRefPtr_c pQueryTokenizerQL, TokenizerRefPtr_c pQueryTokenizerJson, const CSphSchema * pSchema, const DictRefPtr_c & pDict, const CSphIndexSettings & tSettings, const CSphBitvec * pMorphFields ) const
{
JsonObj_c tRoot ( szQuery );
// take only the first item of the query; ignore the rest
int iNumIndexes = ( tRoot.Empty() ? 0 : tRoot.Size() );
if ( !iNumIndexes )
{
tParsed.m_sParseError = "\"query\" property is empty";
return false;
}
if ( iNumIndexes!=1 && !CheckRootNode ( tRoot, tParsed.m_sParseError ) )
return false;
assert ( pQueryTokenizerJson->IsQueryTok() );
DictRefPtr_c pMyDict = GetStatelessDict ( pDict );
QueryTreeBuilder_c tBuilder ( pQuery, std::move ( pQueryTokenizerQL ), tSettings );
tBuilder.Setup ( pSchema, pQueryTokenizerJson->Clone ( SPH_CLONE ), pMyDict, &tParsed, tSettings );
const JsonObj_c tFtNode = FindFullTextQueryNode ( tRoot );
XQNode_t * pRoot = ConstructNode ( tFtNode, tBuilder );
if ( tBuilder.IsError() )
{
tBuilder.Cleanup();
QueryTreeBuilder_c tErrorBuilder { tBuilder.CreateCollectPath ( pSchema ) };
ConstructNode ( tFtNode, tErrorBuilder );
tErrorBuilder.Cleanup();
tErrorBuilder.ErrorPrintPath ( tBuilder );
return false;
}
XQLimitSpec_t tLimitSpec;
pRoot = tBuilder.FixupTree ( pRoot, tLimitSpec, pMorphFields, IsAllowOnlyNot() );
if ( tBuilder.IsError() )
{
tBuilder.Cleanup();
return false;
}
tParsed.m_bSingleWord = ( pRoot && pRoot->m_dChildren.IsEmpty() && pRoot->m_dWords.GetLength() == 1 );
tParsed.m_pRoot = pRoot;
return true;
}
static const char * g_szOperatorNames[]=
{
"and",
"or"
};
static XQOperator_e StrToNodeOp ( const char * szStr )
{
if ( !szStr )
return SPH_QUERY_TOTAL;
int iOp=0;
for ( auto i : g_szOperatorNames )
{
if ( !strcmp ( szStr, i ) )
return XQOperator_e(iOp);
iOp++;
}
return SPH_QUERY_TOTAL;
}
static bool IsBoolNode ( const JsonObj_c & tJson )
{
if ( !tJson )
return false;
return CSphString ( tJson.Name() )=="bool";
}
bool IsBoolNode ( const CSphString & sName )
{
return ( sName=="bool" );
}
static float GetBoost ( const JsonObj_c & tFields )
{
const float fBoostDefault = 1.0f;
if ( !tFields.IsObj() )
return fBoostDefault;
JsonObj_c tBoost = tFields.GetItem ( "boost" );
if ( !tBoost || !tBoost.IsNum() )
return fBoostDefault;
return tBoost.FltVal();
}
XQNode_t * QueryParserJson_c::ConstructMatchNode ( const JsonObj_c & tJson, bool bPhrase, bool bTerms, bool bSingleTerm, QueryTreeBuilder_c & tBuilder ) const
{
ErrorPathGuard_t tGuard = tBuilder.ErrorAddPath ( tJson );
if ( !tJson.IsObj() )
{
tBuilder.Error ( "\"match\" value should be an object" );
return nullptr;
}
if ( tJson.Size()!=1 )
{
tBuilder.Error ( "ill-formed \"match\" property" );
return nullptr;
}
JsonObj_c tFields = tJson[0];
tBuilder.SetString ( tFields.Name() );
XQLimitSpec_t tLimitSpec;
const char * szQuery = nullptr;
XQOperator_e eNodeOp = bPhrase ? SPH_QUERY_PHRASE : SPH_QUERY_OR;
bool bIgnore = false;
StringBuilder_c tTermsBuf ( " " );
if ( !tBuilder.ParseFields ( tLimitSpec.m_dFieldMask, tLimitSpec.m_iFieldMaxPos, bIgnore ) )
return nullptr;
if ( bIgnore )
{
tBuilder.Warning ( R"(ignoring fields in "%s", using "_all")", tFields.Name() );
tLimitSpec.Reset();
}
tLimitSpec.m_bFieldSpec = true;
if ( bTerms )
{
if ( !tFields.IsArray() )
{
tBuilder.Warning ( "values of properties in \"terms\" should be an array" );
return nullptr;
}
for ( const auto & tTerm : tFields )
{
if ( !tTerm.IsStr() )
{
tBuilder.Error ( "\"terms\" value should be a string" );
return nullptr;
}
tTermsBuf += tTerm.SzVal();
}
szQuery = tTermsBuf.cstr();
} else if ( tFields.IsObj() )
{
// matching with flags
CSphString sError;
JsonObj_c tQuery = ( bSingleTerm ? tFields.GetStrItem ( "value", sError ) : tFields.GetStrItem ( "query", sError ) );
if ( !tQuery )
{
tBuilder.Error ( "%s", sError.cstr() );
return nullptr;
}
szQuery = tQuery.SzVal();
if ( !bPhrase )
{
JsonObj_c tOp = tFields.GetItem ( "operator" );
if ( tOp ) // "and", "or"
{
eNodeOp = StrToNodeOp ( tOp.SzVal() );
if ( eNodeOp==SPH_QUERY_TOTAL )
{
tBuilder.Error ( "unknown operator: \"%s\"", tOp.SzVal() );
return nullptr;
}
}
}
} else
{
// simple list of keywords
if ( !tFields.IsStr() )
{
tBuilder.Warning ( "values of properties in \"match\" should be strings or objects" );
return nullptr;
}
szQuery = tFields.SzVal();
}
assert ( szQuery );
XQNode_t * pNewNode = tBuilder.CreateNode ( tLimitSpec );
pNewNode->SetOp ( eNodeOp );
float fBoost = GetBoost ( tFields );
tBuilder.CollectKeywords ( szQuery, pNewNode, tLimitSpec, fBoost );
return pNewNode;
}
bool QueryParserJson_c::ConstructNodeOrFilter ( const JsonObj_c & tItem, CSphVector<XQNode_t *> & dNodes, QueryTreeBuilder_c & tBuilder ) const
{
if ( !tItem )
return true;
// we created filters before, no need to process them again
if ( IsFilter(tItem) )
{
tBuilder.m_bHasFilter = true;
return true;
}
XQNode_t * pNode = ConstructNode ( tItem, tBuilder );
if ( !pNode )
return IsBoolNode ( tItem ); // need walk down the tree for compart mode
dNodes.Add ( pNode );
return true;
}
bool QueryParserJson_c::ConstructBoolNodeItems ( const JsonObj_c & tClause, CSphVector<XQNode_t *> & dItems, QueryTreeBuilder_c & tBuilder ) const
{
ErrorPathGuard_t tGuard = tBuilder.ErrorAddPath ( tClause );
if ( tClause.IsArray() )
{
for ( const auto & tObject : tClause )
{
if ( !tObject.IsObj() )
{
tBuilder.Error ( "\"%s\" array value should be an object", tClause.Name() );
return false;
}
if ( !ConstructNodeOrFilter ( tObject[0], dItems, tBuilder ) )
return false;
}
} else if ( tClause.IsObj() )
{
if ( !ConstructNodeOrFilter ( tClause[0], dItems, tBuilder ) )
return false;
} else
{
tBuilder.Error ( "\"%s\" value should be an object or an array", tClause.Name() );
return false;
}
return true;
}
XQNode_t * QueryParserJson_c::ConstructBoolNode ( const JsonObj_c & tJson, QueryTreeBuilder_c & tBuilder ) const
{
ErrorPathGuard_t tGuard = tBuilder.ErrorAddPath ( tJson );
if ( !tJson.IsObj() )
{
tBuilder.Error ( "\"bool\" value should be an object" );
return nullptr;
}
CSphVector<XQNode_t *> dMust, dShould, dMustNot;
for ( const auto & tClause : tJson )
{
tBuilder.ResetNodesFlags();
CSphString sName = tClause.Name();
if ( sName=="must" )
{
if ( !ConstructBoolNodeItems ( tClause, dMust, tBuilder ) )
return nullptr;
} else if ( sName=="should" )
{
if ( !ConstructBoolNodeItems ( tClause, dShould, tBuilder ) )
return nullptr;
if ( tBuilder.m_bHasFilter && tBuilder.m_bHasFulltext )
{
tBuilder.Error ( "filter and full-text can be used together only inside \"must\" node" );
return nullptr;
}
} else if ( sName=="must_not" )
{
if ( !ConstructBoolNodeItems ( tClause, dMustNot, tBuilder ) )
return nullptr;
} else if ( sName=="filter" )
{
if ( !ConstructBoolNodeItems ( tClause, dMust, tBuilder ) )
return nullptr;
} else if ( sName=="minimum_should_match" ) // FIXME!!! add to should as option
{
continue;
} else
{
tBuilder.Error ( "unknown bool query type: \"%s\"", sName.cstr() );
return nullptr;
}
}
XQNode_t * pMustNode = nullptr;
XQNode_t * pShouldNode = nullptr;
XQNode_t * pMustNotNode = nullptr;
XQLimitSpec_t tLimitSpec;
if ( dMust.GetLength() )
{
// no need to construct AND node for a single child
if ( dMust.GetLength()==1 )
pMustNode = dMust[0];
else
{
XQNode_t * pAndNode = tBuilder.CreateNode ( tLimitSpec );
pAndNode->SetOp ( SPH_QUERY_AND );
for ( auto & i : dMust )
{
pAndNode->m_dChildren.Add(i);
i->m_pParent = pAndNode;
}
pMustNode = pAndNode;
}
}
if ( dShould.GetLength() )
{
if ( dShould.GetLength()==1 )
pShouldNode = dShould[0];
else
{
XQNode_t * pOrNode = tBuilder.CreateNode ( tLimitSpec );
pOrNode->SetOp ( SPH_QUERY_OR );
for ( auto & i : dShould )
{
pOrNode->m_dChildren.Add(i);
i->m_pParent = pOrNode;
}
pShouldNode = pOrNode;
}
}
// slightly different case - we need to construct the NOT node anyway
if ( dMustNot.GetLength() )
{
XQNode_t * pNotNode = tBuilder.CreateNode ( tLimitSpec );
pNotNode->SetOp ( SPH_QUERY_NOT );
if ( dMustNot.GetLength()==1 )
{
pNotNode->m_dChildren.Add ( dMustNot[0] );
dMustNot[0]->m_pParent = pNotNode;
} else
{
XQNode_t * pOrNode = tBuilder.CreateNode ( tLimitSpec );
pOrNode->SetOp ( SPH_QUERY_OR );
for ( auto & i : dMustNot )
{
pOrNode->m_dChildren.Add ( i );
i->m_pParent = pOrNode;
}
pNotNode->m_dChildren.Add ( pOrNode );
pOrNode->m_pParent = pNotNode;
}
pMustNotNode = pNotNode;
}
int iTotalNodes = 0;
iTotalNodes += pMustNode ? 1 : 0;
iTotalNodes += pShouldNode ? 1 : 0;
iTotalNodes += pMustNotNode ? 1 : 0;
XQNode_t * pResultNode = nullptr;
if ( !iTotalNodes )
return nullptr;
else if ( iTotalNodes==1 )
{
if ( pMustNode )
pResultNode = pMustNode;
else if ( pShouldNode )
pResultNode = pShouldNode;
else
pResultNode = pMustNotNode;
assert ( pResultNode );
} else
{
pResultNode = pMustNode ? pMustNode : pMustNotNode;
assert ( pResultNode );
// combine 'must' and 'must_not' with AND
if ( pMustNode && pMustNotNode )
{
XQNode_t * pAndNode = tBuilder.CreateNode(tLimitSpec);
pAndNode->SetOp(SPH_QUERY_AND);
pAndNode->m_dChildren.Add ( pMustNode );
pAndNode->m_dChildren.Add ( pMustNotNode );
pMustNode->m_pParent = pAndNode;
pMustNotNode->m_pParent = pAndNode;
pResultNode = pAndNode;
}
// combine 'result' node and 'should' node with MAYBE
if ( pShouldNode )
{
XQNode_t * pMaybeNode = tBuilder.CreateNode ( tLimitSpec );
pMaybeNode->SetOp ( SPH_QUERY_MAYBE );
pMaybeNode->m_dChildren.Add ( pResultNode );
pMaybeNode->m_dChildren.Add ( pShouldNode );
pShouldNode->m_pParent = pMaybeNode;
pResultNode->m_pParent = pMaybeNode;
pResultNode = pMaybeNode;
}
}
return pResultNode;
}
XQNode_t * QueryParserJson_c::ConstructQLNode ( const JsonObj_c & tJson, QueryTreeBuilder_c & tBuilder ) const
{
ErrorPathGuard_t tGuard = tBuilder.ErrorAddPath ( tJson );
if ( !tJson.IsStr() )
{
tBuilder.Error ( "\"query_string\" value should be an string" );
return nullptr;
}
XQQuery_t tParsed;
tParsed.m_dZones = tBuilder.GetZone(); // should keep the same zone list for whole tree
// no need to pass morph fields here as upper level does fixup
if ( !sphParseExtendedQuery ( tParsed, tJson.StrVal().cstr(), tBuilder.GetQuery(), tBuilder.GetQLTokenizer(), tBuilder.GetSchema(), tBuilder.GetDict(), tBuilder.GetIndexSettings(), nullptr ) )
{
tBuilder.Error ( "%s", tParsed.m_sParseError.cstr() );
return nullptr;
}
if ( !tParsed.m_sParseWarning.IsEmpty() )
tBuilder.Warning ( "%s", tParsed.m_sParseWarning.cstr() );
XQNode_t * pRoot = tParsed.m_pRoot;
tParsed.m_pRoot = nullptr;
tBuilder.SetZone ( tParsed.m_dZones );
return pRoot;
}
XQNode_t * QueryParserJson_c::ConstructMatchAllNode ( QueryTreeBuilder_c & tBuilder ) const
{
XQLimitSpec_t tLimitSpec;
XQNode_t * pNewNode = tBuilder.CreateNode ( tLimitSpec );
pNewNode->SetOp ( SPH_QUERY_NULL );
return pNewNode;
}
static bool IsFtMatch ( const CSphString & sName )
{
return ( sName=="match" );
}
static bool IsFtTerms ( const CSphString & sName )
{
return ( sName=="terms" );
}
static bool IsFtPhrase ( const CSphString & sName )
{
return ( sName=="match_phrase" );
}
static bool IsFtTerm ( const CSphString & sName )
{
return ( sName=="term" );
}
static bool IsFtMatchAll ( const CSphString & sName )
{
return ( sName=="match_all" );
}
static bool IsFtQueryString ( const CSphString & sName )
{
return ( sName=="query_string" );
}
static bool IsFtQueryStringSimple ( const CSphString & sName )
{
return ( sName=="simple_query_string" );
}
bool IsFullText ( const CSphString & sName )
{
return ( IsFtMatch ( sName ) || IsFtTerms ( sName ) || IsFtPhrase ( sName ) || IsFtTerm ( sName ) || IsFtMatchAll ( sName ) || IsFtQueryString ( sName ) || IsFtQueryStringSimple ( sName ));
}
XQNode_t * QueryParserJson_c::ConstructNode ( const JsonObj_c & tJson, QueryTreeBuilder_c & tBuilder ) const
{
ErrorPathGuard_t tGuard = tBuilder.ErrorAddPath ( tJson );
CSphString sName = tJson.Name();
if ( !tJson || sName.IsEmpty() )
{
tBuilder.Error ( "empty json found" );
return nullptr;
}
bool bMatch = IsFtMatch ( sName );
bool bTerms = IsFtTerms ( sName );
bool bPhrase = IsFtPhrase ( sName );
bool bSingleTerm = IsFtTerm ( sName );
if ( bMatch || bPhrase || bTerms || bSingleTerm )
{
tBuilder.m_bHasFulltext = true;
return ConstructMatchNode ( tJson, bPhrase, bTerms, bSingleTerm, tBuilder );
}
if ( IsFtMatchAll ( sName ) )
{
tBuilder.m_bHasFulltext = true;
return ConstructMatchAllNode ( tBuilder );
}
if ( IsBoolNode ( sName ) )
return ConstructBoolNode ( tJson, tBuilder );
if ( IsFtQueryString ( sName ) )
{
tBuilder.m_bHasFulltext = true;
return ConstructQLNode ( tJson, tBuilder );
}
if ( IsFtQueryStringSimple ( sName ) && tJson.IsObj() )
{
tBuilder.m_bHasFulltext = true;
return ConstructQLNode ( tJson.GetItem ( "query" ), tBuilder );
}
tBuilder.Error ( "unknown full-text node '%s'", sName.cstr() );
return nullptr;
}
bool NonEmptyQuery ( const JsonObj_c & tQuery )
{
return ( tQuery.HasItem("match")
|| tQuery.HasItem("match_phrase")
|| tQuery.HasItem("bool") )
|| tQuery.HasItem("query_string");
}
//////////////////////////////////////////////////////////////////////////
static bool ParseSnippet ( const JsonObj_c & tSnip, CSphQuery & tQuery, CSphString & sError );
static bool ParseSort ( const JsonObj_c & tSort, JsonQuery_c & tQuery, bool & bGotWeight, CSphString & sError, CSphString & sWarning );
static bool ParseSelect ( const JsonObj_c & tSelect, CSphQuery & tQuery, CSphString & sError );
static bool ParseScriptFields ( const JsonObj_c & tExpr, CSphQuery & tQuery, CSphString & sError );
static bool ParseExpressions ( const JsonObj_c & tExpr, CSphQuery & tQuery, CSphString & sError );
static bool ParseDocFields ( const JsonObj_c & tDocFields, JsonQuery_c & tQuery, CSphString & sError );
static bool ParseAggregates ( const JsonObj_c & tAggs, JsonQuery_c & tQuery, CSphString & sError );
static bool ParseIndex ( const JsonObj_c & tRoot, SqlStmt_t & tStmt, CSphString & sError )
{
if ( !tRoot )
{
sError.SetSprintf ( "unable to parse: %s", tRoot.GetErrorPtr() );
return false;
}
JsonObj_c tIndex = tRoot.GetStrItem ( "table", sError );
if ( !tIndex )
{
tIndex = tRoot.GetStrItem ( "index", sError, true );
if ( !tIndex )
return false;
sError = "";
}
tStmt.m_sIndex = tIndex.StrVal();
tStmt.m_tQuery.m_sIndexes = tStmt.m_sIndex;
const char * sIndexStart = strchr ( tStmt.m_sIndex.cstr(), ':' );
if ( sIndexStart!=nullptr )
{
const char * sIndex = tStmt.m_sIndex.cstr();
sError.SetSprintf ( "wrong table at cluster syntax, use \"cluster\": \"%.*s\" and \"index\": \"%s\" properties, instead of '%s'",
(int)(sIndexStart-sIndex), sIndex, sIndexStart+1, sIndex );
return false;
}
return true;
}
static bool ParseIndexId ( const JsonObj_c & tRoot, bool bArrayIds, SqlStmt_t & tStmt, DocID_t & tDocId, CSphString & sError )
{
if ( !ParseIndex ( tRoot, tStmt, sError ) )
return false;
JsonObj_c tId = tRoot.GetItem ( "id" );
if ( tId )
{
if ( !tId.IsInt() && !tId.IsUint() && !tId.IsArray() )
{
sError = "Document ids should be integer or array of integers";
return false;
}
if ( !bArrayIds && tId.IsArray() )
{
sError = "Document ids should be integer";
return false;
}
if ( !tId.IsArray() )
{
if ( tId.IsInt() && tId.IntVal()<0 )
{
sError = "Negative document ids are not allowed";
return false;
}
} else
{
for ( const auto & tItem : tId )
{
if ( !tItem.IsInt() && !tItem.IsUint() )
{
sError = "Document ids should be integer";
return false;
}
if ( tItem.IsInt() && tItem.IntVal()<0 )
{
sError = "Negative document ids are not allowed";
return false;
}
}
}
}
if ( tId && !tId.IsArray() )
tDocId = tId.IntVal();
else
tDocId = 0; // enable auto-id
return true;
}
static bool ParseCluster ( const JsonObj_c & tRoot, SqlStmt_t & tStmt, CSphString & sError )
{
if ( !tRoot )
{
sError.SetSprintf ( "unable to parse: %s", tRoot.GetErrorPtr() );
return false;
}
// cluster is optional
JsonObj_c tCluster = tRoot.GetStrItem ( "cluster", sError, true );
if ( tCluster )
tStmt.m_sCluster = tCluster.StrVal();
return true;
}
std::unique_ptr<QueryParser_i> sphCreateJsonQueryParser()
{
return std::make_unique<QueryParserJson_c>();
}
static bool ParseLimits ( const JsonObj_c & tRoot, CSphQuery & tQuery, CSphString & sError )
{
JsonObj_c tLimit = tRoot.GetIntItem ( "limit", "size", sError );
if ( !sError.IsEmpty() )
return false;
if ( tLimit )
tQuery.m_iLimit = (int)tLimit.IntVal();
JsonObj_c tOffset = tRoot.GetIntItem ( "offset", "from", sError );
if ( !sError.IsEmpty() )
return false;
if ( tOffset )
tQuery.m_iOffset = (int)tOffset.IntVal();
JsonObj_c tCutoff = tRoot.GetIntItem ( "cutoff", sError, true );
if ( !sError.IsEmpty() )
return false;
if ( tCutoff )
tQuery.m_iCutoff = (int)tCutoff.IntVal();
JsonObj_c tMaxMatches = tRoot.GetIntItem ( "max_matches", sError, true );
if ( !sError.IsEmpty() )
return false;
if ( tMaxMatches )
{
tQuery.m_iMaxMatches = (int)tMaxMatches.IntVal();
tQuery.m_bExplicitMaxMatches = true;
}
return true;
}
static bool ParseOptions ( const JsonObj_c & tRoot, CSphQuery & tQuery, CSphString & sError )
{
JsonObj_c tOptions = tRoot.GetItem("options");
if ( !tOptions )
return true;
if ( !tOptions.IsObj() )
{
sError = "\"options\" property value should be an object";
return false;
}
for ( const auto & i : tOptions )
{
AddOption_e eAdd = AddOption_e::NOT_FOUND;
CSphString sOpt = i.Name();
if ( i.IsInt() )
eAdd = AddOption ( tQuery, sOpt, i.StrVal(), i.IntVal(), STMT_SELECT, sError );
else if ( i.IsStr() )
{
CSphString sRanker = i.StrVal();
const char * szRanker = sRanker.cstr();
while ( sphIsAlpha(*szRanker) )
szRanker++;
if ( *szRanker=='(' && sRanker.Ends(")") )
{
int iRankerNameLen = szRanker-sRanker.cstr();
CSphString sExpr = sRanker.SubString (iRankerNameLen+1, sRanker.Length()-iRankerNameLen-2 );
sExpr.Unquote();
sRanker = sRanker.SubString ( 0, iRankerNameLen );
eAdd = ::AddOptionRanker ( tQuery, sOpt, sRanker, [sExpr]{ return sExpr; }, STMT_SELECT, sError );
}
if ( eAdd==AddOption_e::NOT_FOUND )
eAdd = AddOption ( tQuery, sOpt, i.StrVal(), [&i]{ return i.StrVal(); }, STMT_SELECT, sError );
}
else if ( i.IsObj() )
{
CSphVector<CSphNamedInt> dNamed;
for ( const auto & tNamed : i )
{
if ( !tNamed.IsInt() )
{
sError.SetSprintf ( "\"%s\" property of \"%s\"' option should be integer", sOpt.cstr(), tNamed.Name() );
return false;
}
dNamed.Add ( { tNamed.Name(), tNamed.IntVal() } );
}
eAdd = ::AddOption ( tQuery, sOpt, dNamed, STMT_SELECT, sError );
}
if ( eAdd==AddOption_e::NOT_FOUND )
{
sError.SetSprintf ( "unknown option '%s'", sOpt.cstr () );
return false;
}
else if ( eAdd==AddOption_e::FAILED )
return false;
}
return true;
}
static bool ParseKNNQuery ( const JsonObj_c & tJson, CSphQuery & tQuery, CSphString & sError, CSphString & sWarning )
{
if ( !tJson )
return true;
if ( !tJson.IsObj() )
{
sError = "\"knn\" property value should be an object";
return false;
}
if ( !tJson.FetchStrItem ( tQuery.m_sKNNAttr, "field", sError ) ) return false;
if ( !tJson.FetchIntItem ( tQuery.m_iKNNK, "k", sError ) ) return false;
if ( !tJson.FetchIntItem ( tQuery.m_iKnnEf, "ef", sError, true ) ) return false;
JsonObj_c tQueryVec = tJson.GetArrayItem ( "query_vector", sError );
if ( !tQueryVec )
return false;
for ( const auto & tArrayItem : tQueryVec )
{
if ( !tArrayItem.IsInt() && !tArrayItem.IsDbl() )
{
sError = "\"query_vector\" items should be integer of float";
return false;
}
tQuery.m_dKNNVec.Add ( tArrayItem.FltVal() );
}
return true;
}
static bool ParseOnCond ( const JsonObj_c & tRoot, CSphString & sIdx, CSphString & sAttr, ESphAttr & eType, CSphString & sError )
{
CSphString sType;
if ( !tRoot.FetchStrItem ( sIdx, "table", sError ) ) return false;
if ( !tRoot.FetchStrItem ( sAttr, "field", sError ) ) return false;
if ( !tRoot.FetchStrItem ( sType, "type", sError, true ) ) return false;
if ( !sType.IsEmpty() )
{
if ( sType=="int" || sType=="integer" )
eType = SPH_ATTR_INTEGER;
else if ( sType=="float" )
eType = SPH_ATTR_FLOAT;
else if ( sType=="string" )
eType = SPH_ATTR_STRING;
else
{
sError.SetSprintf ( "unknown \"type\" value: \"%s\"", sType.cstr() );
return false;
}
}
return true;
}
static bool ParseOnFilter ( const JsonObj_c & tRoot, OnFilter_t & tOnFilter, CSphString & sError )
{
if ( !tRoot.IsObj() )
{
sError = "\"on\" items should be objects";
return false;
}
CSphString sOp;
if ( !tRoot.FetchStrItem ( sOp, "operator", sError ) )
return false;
if ( sOp!="eq" )
{
sError = "Unknown \"operator\" value";
return false;
}
JsonObj_c tLeft = tRoot.GetObjItem ( "left", sError );
if ( !tLeft )
return false;
JsonObj_c tRight = tRoot.GetObjItem ( "right", sError );
if ( !tRight )
return false;
if ( !ParseOnCond ( tLeft, tOnFilter.m_sIdx1, tOnFilter.m_sAttr1, tOnFilter.m_eTypeCast1, sError ) )
return false;
if ( !ParseOnCond ( tRight, tOnFilter.m_sIdx2, tOnFilter.m_sAttr2, tOnFilter.m_eTypeCast2, sError ) )
return false;
return true;
}
static bool ParseJoin ( const JsonObj_c & tRoot, CSphQuery & tQuery, CSphString & sError, CSphString & sWarning )
{
JsonObj_c tJoin = tRoot.GetArrayItem ( "join", sError, true );
if ( !tJoin )
return true;
int iNumJoins = 0;
for ( const auto & tJoinItem : tJoin )
{
if ( iNumJoins>0 )
{
sError = "Only single table joins are currently supported";
return false;
}
CSphString sJoinType;
if ( !tJoinItem.FetchStrItem ( sJoinType, "type", sError ) )
return false;
if ( sJoinType=="inner" )
tQuery.m_eJoinType = JoinType_e::INNER;
else if ( sJoinType=="left" )
tQuery.m_eJoinType = JoinType_e::LEFT;
else
{
sError.SetSprintf ( "unknown join type '%s'", sJoinType.cstr() );
return false;
}
if ( !tJoinItem.FetchStrItem ( tQuery.m_sJoinIdx, "table", sError ) )
return false;
JsonObj_c tMatchQuery = tJoinItem.GetObjItem ( "query", sError, true );
if ( tMatchQuery )
tQuery.m_sJoinQuery = tMatchQuery.AsString();
JsonObj_c tOn = tJoinItem.GetArrayItem ( "on", sError );
if ( !tOn )
return false;
for ( const auto & tCond : tOn )
{
OnFilter_t tOnFilter;
if ( !ParseOnFilter ( tCond, tOnFilter, sError ) )
return false;
tQuery.m_dOnFilters.Add(tOnFilter);
}
iNumJoins++;
}
return true;
}
bool sphParseJsonQuery ( Str_t sQuery, ParsedJsonQuery_t & tPJQuery )
{
JsonObj_c tRoot ( sQuery );
tPJQuery.m_tQuery.m_sRawQuery = sQuery;
return sphParseJsonQuery ( tRoot, tPJQuery );
}
bool sphParseJsonQuery ( const JsonObj_c & tRoot, ParsedJsonQuery_t & tPJQuery )
{
TlsMsg::ResetErr();
if ( !tRoot )
return TlsMsg::Err ( "unable to parse: %s", tRoot.GetErrorPtr() );
TLS_MSG_STRING ( sError );
JsonObj_c tIndex = tRoot.GetStrItem ( "table", sError );
if ( !tIndex )
{
tIndex = tRoot.GetStrItem ( "index", sError, true );
if ( !tIndex )
return false;
sError = "";
}
auto & tQuery = tPJQuery.m_tQuery;
tQuery.m_sIndexes = tIndex.StrVal();
if ( tQuery.m_sIndexes==g_szAll )
tQuery.m_sIndexes = "*";
if ( !ParseLimits ( tRoot, tQuery, sError ) )
return false;
JsonObj_c tJsonQuery = tRoot.GetItem("query");
JsonObj_c tKNNQuery = tRoot.GetItem("knn");
if ( tJsonQuery && tKNNQuery )
return TlsMsg::Err ( "\"query\" can't be used together with \"knn\"" );
// common code used by search queries and update/delete by query
if ( !ParseJsonQueryFilters ( tJsonQuery, tQuery, sError, tPJQuery.m_sWarning ) )
return false;
if ( !ParseKNNQuery ( tKNNQuery, tQuery, sError, tPJQuery.m_sWarning ) )
return false;
if ( tKNNQuery && !ParseJsonQueryFilters ( tKNNQuery, tQuery, sError, tPJQuery.m_sWarning ) )
return false;
if ( !ParseJoin ( tRoot, tQuery, sError, tPJQuery.m_sWarning ) )
return false;
if ( !ParseOptions ( tRoot, tQuery, sError ) )
return false;
if ( !tRoot.FetchBoolItem ( tPJQuery.m_bProfile, "profile", sError, true ) )
return false;
if ( !tRoot.FetchIntItem ( tPJQuery.m_iPlan, "plan", sError, true ) )
return false;
// expression columns go first to select list
JsonObj_c tScriptFields = tRoot.GetItem ( "script_fields" );
if ( tScriptFields && !ParseScriptFields ( tScriptFields, tQuery, sError ) )
return false;
// a synonym to "script_fields"
JsonObj_c tExpressions = tRoot.GetItem ( "expressions" );
if ( tExpressions && !ParseExpressions ( tExpressions, tQuery, sError ) )
return false;
JsonObj_c tSnip = tRoot.GetObjItem ( "highlight", sError, true );
if ( tSnip )
{
if ( !ParseSnippet ( tSnip, tQuery, sError ) )
return false;
}
else if ( !sError.IsEmpty() )
return false;
JsonObj_c tSort = tRoot.GetItem("sort");
if ( tSort && !( tSort.IsArray() || tSort.IsObj() ) )
{
sError = "\"sort\" property value should be an array or an object";
return false;
}
if ( tSort )
{
bool bGotWeight = false;
if ( !ParseSort ( tSort, tQuery, bGotWeight, sError, tPJQuery.m_sWarning ) )
return false;
JsonObj_c tTrackScore = tRoot.GetBoolItem ( "track_scores", sError, true );
if ( !sError.IsEmpty() )
return false;
bool bTrackScore = tTrackScore && tTrackScore.BoolVal();
if ( !bGotWeight && !bTrackScore )
tQuery.m_eRanker = SPH_RANK_NONE;
}
else
{
// set defaults
tQuery.m_eSort = SPH_SORT_EXTENDED;
tQuery.m_sSortBy = "@weight desc";
tQuery.m_sOrderBy = "@weight desc";
}
// source \ select filter
JsonObj_c tSelect = tRoot.GetItem("_source");
bool bParsedSelect = ( !tSelect || ParseSelect ( tSelect, tQuery, sError ) );
if ( !bParsedSelect )
return false;
// docvalue_fields
JsonObj_c tDocFields = tRoot.GetItem ( "docvalue_fields" );
if ( tDocFields && !ParseDocFields ( tDocFields, tQuery, sError ) )
return false;
// aggs
JsonObj_c tAggs = tRoot.GetItem ( "aggs" );
if ( tAggs && !ParseAggregates ( tAggs, tQuery, sError ) )
return false;
return true;
}
bool ParseJsonInsert ( const JsonObj_c & tRoot, SqlStmt_t & tStmt, DocID_t & tDocId, bool bReplace, CSphString & sError )
{
if ( !ParseIndexId ( tRoot, false, tStmt, tDocId, sError ) )
return false;
if ( !ParseCluster ( tRoot, tStmt, sError ) )
return false;
tStmt.m_dInsertSchema.Add ( sphGetDocidName() );
SqlInsert_t & tId = tStmt.m_dInsertValues.Add();
tId.m_iType = SqlInsert_t::CONST_INT;
tId.SetValueInt ( (uint64_t)tDocId, false );
// "doc" is optional
JsonObj_c tSource = tRoot.GetItem("doc");
return ParseJsonInsertSource ( tSource, tStmt, bReplace, sError );
}
static bool ParseJsonInsertSource ( const JsonObj_c & tSource, StrVec_t & dInsertSchema, CSphVector<SqlInsert_t> & dInsertValues, CSphString & sError )
{
if ( !tSource )
return true;
for ( const auto & tItem : tSource )
{
dInsertSchema.Add ( tItem.Name() );
dInsertSchema.Last().ToLower();
SqlInsert_t & tNewValue = dInsertValues.Add();
if ( tItem.IsStr() || tItem.IsNull() )
{
tNewValue.m_iType = ( tItem.IsStr() ? SqlInsert_t::QUOTED_STRING : SqlInsert_t::TOK_NULL );
tNewValue.m_sVal = tItem.StrVal();
} else if ( tItem.IsDbl() )
{
tNewValue.m_iType = SqlInsert_t::CONST_FLOAT;
tNewValue.m_fVal = tItem.FltVal();
} else if ( tItem.IsInt() || tItem.IsBool() || tItem.IsUint() )
{
tNewValue.m_iType = SqlInsert_t::CONST_INT;
tNewValue.SetValueInt ( tItem.IntVal() );
} else if ( tItem.IsArray() || tItem.IsObj() )
{
// could be either object or array
// all fit to JSON attribute
// array of int fits MVA attribute
tNewValue.m_sVal = tItem.AsString();
bool bMVA = false;
if ( tItem.IsArray() )
{
tNewValue.m_iType = SqlInsert_t::CONST_MVA;
tNewValue.m_pVals = new RefcountedVector_c<AttrValue_t>;
for ( const auto & tArrayItem : tItem )
{
if ( !tArrayItem.IsInt() && !tArrayItem.IsDbl() )
break;
tNewValue.m_pVals->Add ( { tArrayItem.IntVal(), tArrayItem.FltVal() } );
bMVA = true;
}
if ( !bMVA && !tItem.Size() )
bMVA = true;
}
if ( !bMVA )
{
tNewValue.m_iType = SqlInsert_t::QUOTED_STRING;
tNewValue.m_pVals = nullptr;
}
} else
{
sError.SetSprintf ( "unsupported value type '%s' in field '%s'", tItem.TypeName(), tItem.Name() );
return false;
}
}
return true;
}
bool ParseJsonInsertSource ( const JsonObj_c & tSource, SqlStmt_t & tStmt, bool bReplace, CSphString & sError )
{
tStmt.m_eStmt = bReplace ? STMT_REPLACE : STMT_INSERT;
if ( !ParseJsonInsertSource ( tSource, tStmt.m_dInsertSchema, tStmt.m_dInsertValues, sError ) )
return false;
if ( !tStmt.CheckInsertIntegrity() )
{
sError = "wrong number of values";
return false;
}
return true;
}
bool sphParseJsonInsert ( const char * szInsert, SqlStmt_t & tStmt, DocID_t & tDocId, bool bReplace, CSphString & sError )
{
JsonObj_c tRoot ( szInsert );
return ParseJsonInsert ( tRoot, tStmt, tDocId, bReplace, sError );
}
static bool ParseUpdateDeleteQueries ( const JsonObj_c & tRoot, bool bDelete, SqlStmt_t & tStmt, DocID_t & tDocId, CSphString & sError )
{
tStmt.m_tQuery.m_sSelect = "id";
if ( !ParseIndex ( tRoot, tStmt, sError ) )
return false;
if ( !ParseCluster ( tRoot, tStmt, sError ) )
return false;
JsonObj_c tId = tRoot.GetItem ( "id" );
if ( tId )
{
if ( !ParseIndexId ( tRoot, bDelete, tStmt, tDocId, sError ) )
return false;
CSphFilterSettings & tFilter = tStmt.m_tQuery.m_dFilters.Add();
tFilter.m_eType = SPH_FILTER_VALUES;
if ( bDelete && tId.IsArray() )
{
for ( const auto & tItem : tId )
tFilter.m_dValues.Add ( tItem.IntVal() );
} else
{
tFilter.m_dValues.Add ( tId.IntVal() );
}
tFilter.m_sAttrName = "id";
tDocId = tFilter.m_dValues[0];
}
// "query" is optional
JsonObj_c tQuery = tRoot.GetItem("query");
if ( tQuery && tId )
{
sError = R"(both "id" and "query" specified)";
return false;
}
CSphString sWarning; // fixme: add to results
return ParseJsonQueryFilters ( tQuery, tStmt.m_tQuery, sError, sWarning );
}
bool ParseJsonUpdate ( const JsonObj_c & tRoot, SqlStmt_t & tStmt, DocID_t & tDocId, CSphString & sError )
{
CSphAttrUpdate & tUpd = tStmt.AttrUpdate();
tStmt.m_eStmt = STMT_UPDATE;
if ( !ParseUpdateDeleteQueries ( tRoot, false, tStmt, tDocId, sError ) )
return false;
JsonObj_c tSource = tRoot.GetObjItem ( "doc", sError );
if ( !tSource )
return false;
CSphVector<int64_t> dMVA;
for ( const auto & tItem : tSource )
{
bool bFloat = tItem.IsNum();
bool bInt = tItem.IsInt();
bool bBool = tItem.IsBool();
bool bString = tItem.IsStr();
bool bArray = tItem.IsArray();
bool bObject = tItem.IsObj();
if ( !bFloat && !bInt && !bBool && !bString && !bArray && !bObject )
{
sError.SetSprintf ( "unsupported value type '%s' in field '%s'", tItem.TypeName(), tItem.Name() );
return false;
}
CSphString sAttr = tItem.Name();
TypedAttribute_t & tTypedAttr = tUpd.m_dAttributes.Add();
tTypedAttr.m_sName = sAttr.ToLower();
if ( bInt || bBool )
{
int64_t iValue = tItem.IntVal();
tUpd.m_dPool.Add ( (DWORD)iValue );
auto uHi = (DWORD)( iValue>>32 );
if ( uHi )
{
tUpd.m_dPool.Add ( uHi );
tTypedAttr.m_eType = SPH_ATTR_BIGINT;
} else
tTypedAttr.m_eType = SPH_ATTR_INTEGER;
}
else if ( bFloat )
{
auto fValue = tItem.FltVal();
tUpd.m_dPool.Add ( sphF2DW ( fValue ) );
tTypedAttr.m_eType = SPH_ATTR_FLOAT;
}
else if ( bString || bObject )
{
CSphString sEncoded;
const char * szValue = tItem.SzVal();
if ( bObject )
{
sEncoded = tItem.AsString();
szValue = sEncoded.cstr();
}
auto iLength = (int) strlen ( szValue );
tUpd.m_dPool.Add ( tUpd.m_dBlobs.GetLength() );
tUpd.m_dPool.Add ( iLength );
if ( iLength )
{
BYTE * pBlob = tUpd.m_dBlobs.AddN ( iLength+2 ); // a couple of extra \0 for json parser to be happy
memcpy ( pBlob, szValue, iLength );
pBlob[iLength] = 0;
pBlob[iLength+1] = 0;
}
tTypedAttr.m_eType = SPH_ATTR_STRING;
} else if ( bArray )
{
dMVA.Resize ( 0 );
for ( const auto & tArrayItem : tItem )
{
if ( !tArrayItem.IsInt() )
{
sError = "MVA elements should be integers";
return false;
}
dMVA.Add ( tArrayItem.IntVal() );
}
dMVA.Uniq();
tUpd.m_dPool.Add ( dMVA.GetLength()*2 ); // as 64 bit stored into DWORD vector
tTypedAttr.m_eType = SPH_ATTR_UINT32SET;
for ( int64_t uVal : dMVA )
{
if ( uVal>UINT_MAX )
tTypedAttr.m_eType = SPH_ATTR_INT64SET;
*(( int64_t* ) tUpd.m_dPool.AddN ( 2 )) = uVal;
}
}
}
return true;
}
bool sphParseJsonUpdate ( Str_t sUpdate, SqlStmt_t & tStmt, DocID_t & tDocId, CSphString & sError )
{
JsonObj_c tRoot ( sUpdate );
return ParseJsonUpdate ( tRoot, tStmt, tDocId, sError );
}
static bool ParseJsonDelete ( const JsonObj_c & tRoot, SqlStmt_t & tStmt, DocID_t & tDocId, CSphString & sError )
{
tStmt.m_eStmt = STMT_DELETE;
return ParseUpdateDeleteQueries ( tRoot, true, tStmt, tDocId, sError );
}
bool sphParseJsonDelete ( Str_t sDelete, SqlStmt_t & tStmt, DocID_t & tDocId, CSphString & sError )
{
JsonObj_c tRoot ( sDelete );
return ParseJsonDelete ( tRoot, tStmt, tDocId, sError );
}
bool sphParseJsonStatement ( const char * szStmt, SqlStmt_t & tStmt, CSphString & sStmt, CSphString & sQuery, DocID_t & tDocId, CSphString & sError )
{
JsonObj_c tRoot ( szStmt );
if ( !tRoot )
{
sError.SetSprintf ( "unable to parse: %s", tRoot.GetErrorPtr() );
return false;
}
JsonObj_c tJsonStmt = tRoot[0];
if ( !tJsonStmt )
{
sError = "no statement found";
return false;
}
sStmt = tJsonStmt.Name();
if ( !tJsonStmt.IsObj() )
{
sError.SetSprintf ( "statement %s should be an object", sStmt.cstr() );
return false;
}
if ( sStmt=="index" || sStmt=="replace" )
{
if ( !ParseJsonInsert ( tJsonStmt, tStmt, tDocId, true, sError ) )
return false;
} else if ( sStmt=="create" || sStmt=="insert" )
{
if ( !ParseJsonInsert ( tJsonStmt, tStmt, tDocId, false, sError ) )
return false;
} else if ( sStmt=="update" )
{
if ( !ParseJsonUpdate ( tJsonStmt, tStmt, tDocId, sError ) )
return false;
} else if ( sStmt=="delete" )
{
if ( !ParseJsonDelete ( tJsonStmt, tStmt, tDocId, sError ) )
return false;
} else
{
sError.SetSprintf ( "unknown bulk operation: %s", sStmt.cstr() );
return false;
}
sQuery = tJsonStmt.AsString();
return true;
}
//////////////////////////////////////////////////////////////////////////
static void PackedShortMVA2Json ( StringBuilder_c & tOut, const BYTE * pMVA )
{
auto dMVA = sphUnpackPtrAttr ( pMVA );
auto nValues = dMVA.second / sizeof ( DWORD );
auto pValues = ( const DWORD * ) dMVA.first;
for ( int i = 0; i<(int) nValues; ++i )
tOut.NtoA(pValues[i]);
}
static void PackedWideMVA2Json ( StringBuilder_c & tOut, const BYTE * pMVA )
{
auto dMVA = sphUnpackPtrAttr ( pMVA );
auto nValues = dMVA.second / sizeof ( int64_t );
auto pValues = ( const int64_t * ) dMVA.first;
for ( int i = 0; i<(int) nValues; ++i )
tOut.NtoA(pValues[i]);
}
static void PackedFloatVec2Json ( StringBuilder_c & tOut, const BYTE * pFV )
{
auto tFV = sphUnpackPtrAttr(pFV);
int iNumValues = tFV.second / sizeof(float);
auto pValues = (const float *)tFV.first;
for ( int i = 0; i<iNumValues; i++ )
tOut.FtoA(pValues[i]);
}
static void JsonObjAddAttr ( JsonEscapedBuilder & tOut, ESphAttr eAttrType, const CSphMatch & tMatch, const CSphAttrLocator & tLoc, int iMulti=1 )
{
switch ( eAttrType )
{
case SPH_ATTR_INTEGER:
case SPH_ATTR_TIMESTAMP:
case SPH_ATTR_TOKENCOUNT:
case SPH_ATTR_BIGINT:
tOut.NtoA ( tMatch.GetAttr(tLoc) * iMulti );
break;
case SPH_ATTR_UINT64:
tOut.NtoA ( (uint64_t)tMatch.GetAttr(tLoc) * iMulti );
break;
case SPH_ATTR_FLOAT:
tOut.FtoA ( tMatch.GetAttrFloat(tLoc) * iMulti );
break;
case SPH_ATTR_DOUBLE:
tOut.DtoA ( tMatch.GetAttrDouble(tLoc) * iMulti );
break;
case SPH_ATTR_BOOL:
tOut << ( tMatch.GetAttr ( tLoc ) ? "true" : "false" );
break;
case SPH_ATTR_UINT32SET_PTR:
case SPH_ATTR_INT64SET_PTR:
case SPH_ATTR_FLOAT_VECTOR_PTR:
{
auto _ = tOut.Array ();
const auto * pMVA = ( const BYTE * ) tMatch.GetAttr ( tLoc );
if ( eAttrType==SPH_ATTR_UINT32SET_PTR )
PackedShortMVA2Json ( tOut, pMVA );
else if ( eAttrType==SPH_ATTR_INT64SET_PTR )
PackedWideMVA2Json ( tOut, pMVA );
else
PackedFloatVec2Json ( tOut, pMVA );
}
break;
case SPH_ATTR_STRINGPTR:
{
const auto * pString = ( const BYTE * ) tMatch.GetAttr ( tLoc );
auto dString = sphUnpackPtrAttr ( pString );
// special process for legacy typed strings
if ( dString.second>1 && dString.first[dString.second-2]=='\0')
{
auto uSubtype = dString.first[dString.second-1];
dString.second -= 2;
switch ( uSubtype)
{
case 1: // ql
{
ScopedComma_c sBrackets ( tOut, nullptr, R"({"ql":)", "}" );
tOut.AppendEscapedWithComma (( const char* ) dString.first, dString.second);
break;
}
case 0: // json
tOut << ( const char* ) dString.first;
break;
default:
tOut.Sprintf ("\"internal error! wrong subtype of stringptr %d\"", uSubtype );
}
break;
}
tOut.AppendEscapedWithComma ( ( const char * ) dString.first, dString.second );
}
break;
case SPH_ATTR_JSON_PTR:
{
const auto * pJSON = ( const BYTE * ) tMatch.GetAttr ( tLoc );
auto dJson = sphUnpackPtrAttr ( pJSON );
// no object at all? return NULL
if ( IsEmpty ( dJson ) )
tOut << "null";
else
sphJsonFormat ( tOut, dJson.first );
}
break;
case SPH_ATTR_FACTORS:
case SPH_ATTR_FACTORS_JSON:
{
const auto * pFactors = ( const BYTE * ) tMatch.GetAttr ( tLoc );
auto dFactors = sphUnpackPtrAttr ( pFactors );
if ( IsEmpty ( dFactors ))
tOut << "null";
else
sphFormatFactors ( tOut, (const unsigned int *) dFactors.first, true );
}
break;
case SPH_ATTR_JSON_FIELD_PTR:
{
const auto * pField = ( const BYTE * ) tMatch.GetAttr ( tLoc );
auto dField = sphUnpackPtrAttr ( pField );
if ( IsEmpty ( dField ))
{
tOut << "null";
break;
}
auto eJson = ESphJsonType ( *dField.first++ );
if ( eJson==JSON_NULL )
tOut << "null";
else
sphJsonFieldFormat ( tOut, dField.first, eJson, true );
}
break;
default:
assert ( 0 && "Unknown attribute" );
break;
}
}
static void JsonObjAddAttr ( JsonEscapedBuilder & tOut, ESphAttr eAttrType, const char * szCol, const CSphMatch & tMatch, const CSphAttrLocator & tLoc )
{
assert ( sphPlainAttrToPtrAttr ( eAttrType )==eAttrType );
tOut.AppendName ( szCol );
JsonObjAddAttr ( tOut, eAttrType, tMatch, tLoc );
}
static bool IsHighlightAttr ( const CSphString & sName )
{
return sName.Begins ( g_szHighlight );
}
static bool NeedToSkipAttr ( const CSphString & sName, const CSphQuery & tQuery )
{
const char * szName = sName.cstr();
if ( szName[0]=='i' && szName[1]=='d' && szName[2]=='\0' ) return true;
if ( sName.Begins ( g_szHighlight ) ) return true;
if ( sName.Begins ( GetFilterAttrPrefix() ) ) return true;
if ( sName.Begins ( g_szOrder ) ) return true;
if ( sName.Begins ( GetKnnDistAttrName() ) ) return true;
if ( !tQuery.m_dIncludeItems.GetLength() && !tQuery.m_dExcludeItems.GetLength () )
return false;
// empty include - shows all select list items
// exclude with only "*" - skip all select list items
bool bInclude = ( tQuery.m_dIncludeItems.GetLength()==0 );
for ( const auto &iItem: tQuery.m_dIncludeItems )
{
if ( sphWildcardMatch ( szName, iItem.cstr() ) )
{
bInclude = true;
break;
}
}
if ( bInclude && tQuery.m_dExcludeItems.GetLength() )
{
for ( const auto& iItem: tQuery.m_dExcludeItems )
{
if ( sphWildcardMatch ( szName, iItem.cstr() ) )
{
bInclude = false;
break;
}
}
}
return !bInclude;
}
namespace { // static
void EncodeHighlight ( const CSphMatch & tMatch, int iAttr, const ISphSchema & tSchema, JsonEscapedBuilder & tOut )
{
const CSphColumnInfo & tCol = tSchema.GetAttr(iAttr);
ScopedComma_c tHighlightComma ( tOut, ",", R"("highlight":{)", "}", false );
auto dSnippet = sphUnpackPtrAttr ((const BYTE *) tMatch.GetAttr ( tCol.m_tLocator ));
SnippetResult_t tRes = UnpackSnippetData ( dSnippet );
for ( const auto & tField : tRes.m_dFields )
{
tOut.AppendName ( tField.m_sName.cstr() );
ScopedComma_c tHighlight ( tOut, ",", "[", "]", false );
// we might want to add passage separators to field text here
for ( const auto & tPassage : tField.m_dPassages )
tOut.AppendEscapedWithComma ( (const char *)tPassage.m_dText.Begin(), tPassage.m_dText.GetLength() );
}
}
static const char * GetName ( const CSphString & sName )
{
return sName.cstr();
}
static const char * GetName ( const JsonDocField_t & tDF )
{
return tDF.m_sName.cstr();
}
template <typename T>
void EncodeFields ( const CSphVector<T> & dFields, const AggrResult_t & tRes, const CSphMatch & tMatch, const ISphSchema & tSchema,
bool bValArray, const char * sPrefix, const char * sEnd, JsonEscapedBuilder & tOut )
{
JsonEscapedBuilder tDFVal;
tOut.StartBlock ( ",", sPrefix, sEnd );
for ( const T & tDF : dFields )
{
const CSphColumnInfo * pCol = tSchema.GetAttr ( GetName ( tDF ) );
if ( !pCol )
{
tOut += R"("Default")";
continue;
}
// FIXME!!! add format support
tDFVal.Clear();
JsonObjAddAttr ( tDFVal, pCol->m_eAttrType, tMatch, pCol->m_tLocator );
if ( bValArray )
tOut.Sprintf ( "%s", tDFVal.cstr() );
else
tOut.Sprintf ( R"("%s":["%s"])", GetName ( tDF ), tDFVal.cstr() );
}
tOut.FinishBlock ( false ); // close obj
}
struct CompositeLocator_t
{
ESphAttr m_eAttrType = SPH_ATTR_NONE;
CSphAttrLocator m_tLocator;
const char * m_sName = nullptr;
CompositeLocator_t ( const CSphColumnInfo & tCol, const char * sName )
: m_eAttrType ( tCol.m_eAttrType )
, m_tLocator ( tCol.m_tLocator )
, m_sName ( sName )
{}
CompositeLocator_t() = default;
};
struct AggrKeyTrait_t
{
const CSphColumnInfo * m_pKey = nullptr;
CSphVector<CompositeLocator_t> m_dCompositeKeys;
bool m_bKeyed = false;
RangeNameHash_t m_tRangeNames;
};
static bool GetAggrKey ( const JsonAggr_t & tAggr, const CSphSchema & tSchema, int iAggrItem, int iNow, AggrKeyTrait_t & tRes )
{
if ( tAggr.m_eAggrFunc==Aggr_e::NONE )
{
tRes.m_pKey = tSchema.GetAttr ( tAggr.m_sCol.cstr() );
} else if ( tAggr.m_eAggrFunc==Aggr_e::COMPOSITE )
{
for ( const auto & tItem : tAggr.m_dComposite )
{
const CSphColumnInfo * pCol = tSchema.GetAttr ( tItem.m_sColumn.cstr() );
CSphString sJsonCol;
if ( !pCol && sphJsonNameSplit ( tItem.m_sColumn.cstr(), nullptr, &sJsonCol ) )
pCol = tSchema.GetAttr ( sJsonCol.cstr() );
if ( !pCol )
return false;
tRes.m_dCompositeKeys.Add ( CompositeLocator_t ( *pCol, tItem.m_sAlias.cstr() ) );
}
} else
{
tRes.m_pKey = tSchema.GetAttr ( GetAggrName ( iAggrItem, tAggr.m_sCol ).cstr() );
switch ( tAggr.m_eAggrFunc )
{
case Aggr_e::RANGE:
GetRangeKeyNames ( tAggr.m_tRange, tRes.m_tRangeNames );
tRes.m_bKeyed = tAggr.m_tRange.m_bKeyed;
break;
case Aggr_e::DATE_RANGE:
GetRangeKeyNames ( tAggr.m_tDateRange, iNow, tRes.m_tRangeNames );
tRes.m_bKeyed = tAggr.m_tDateRange.m_bKeyed;
break;
case Aggr_e::HISTOGRAM:
tRes.m_bKeyed = tAggr.m_tHist.m_bKeyed;
break;
case Aggr_e::DATE_HISTOGRAM:
tRes.m_bKeyed = tAggr.m_tDateHist.m_bKeyed;
break;
default:
break;
}
}
return ( tRes.m_pKey || tRes.m_dCompositeKeys.GetLength() );
}
static const char * GetBucketPrefix ( const AggrKeyTrait_t & tKey, Aggr_e eAggrFunc, const RangeKeyDesc_t * pRange, const CSphMatch & tMatch, JsonEscapedBuilder & tPrefixBucketBlock )
{
const char * sPrefix = "{";
if ( tKey.m_bKeyed )
{
switch ( eAggrFunc )
{
case Aggr_e::RANGE:
case Aggr_e::DATE_RANGE:
{
tPrefixBucketBlock.Clear();
tPrefixBucketBlock.Appendf ( "\"%s\":{", pRange->m_sKey.cstr() );
sPrefix = tPrefixBucketBlock.cstr();
}
break;
case Aggr_e::HISTOGRAM:
{
tPrefixBucketBlock.Clear();
tPrefixBucketBlock.Appendf ( "\"");
JsonObjAddAttr ( tPrefixBucketBlock, tKey.m_pKey->m_eAttrType, tMatch, tKey.m_pKey->m_tLocator );
tPrefixBucketBlock.Appendf ( "\":{" );
sPrefix = tPrefixBucketBlock.cstr();
}
break;
case Aggr_e::DATE_HISTOGRAM:
{
tPrefixBucketBlock.Clear();
tPrefixBucketBlock.Appendf ( "\"");
time_t tSrcTime = tMatch.GetAttr ( tKey.m_pKey->m_tLocator );
FormatDate ( tSrcTime, tPrefixBucketBlock );
tPrefixBucketBlock.Appendf ( "\":{" );
sPrefix = tPrefixBucketBlock.cstr();
}
break;
default: break;
}
}
return sPrefix;
}
static void PrintKey ( const AggrKeyTrait_t & tKey, Aggr_e eAggrFunc, const RangeKeyDesc_t * pRange, const CSphMatch & tMatch, ResultSetFormat_e eFormat, const sph::StringSet & hDatetime, JsonEscapedBuilder & tBuf, JsonEscapedBuilder & tOut )
{
if ( eAggrFunc==Aggr_e::DATE_RANGE )
{
if ( !tKey.m_bKeyed )
tOut.Sprintf ( R"("key":"%s")", pRange->m_sKey.cstr() );
if ( !pRange->m_sFrom.IsEmpty() )
tOut.Sprintf ( R"("from":"%s")", pRange->m_sFrom.cstr() );
if ( !pRange->m_sTo.IsEmpty() )
tOut.Sprintf ( R"("to":"%s")", pRange->m_sTo.cstr() );
} else if ( eAggrFunc==Aggr_e::RANGE )
{
if ( !tKey.m_bKeyed )
tOut.Sprintf ( R"("key":"%s")", pRange->m_sKey.cstr() );
if ( !pRange->m_sFrom.IsEmpty() )
tOut.Sprintf ( R"("from":%s)", pRange->m_sFrom.cstr() );
if ( !pRange->m_sTo.IsEmpty() )
tOut.Sprintf ( R"("to":%s)", pRange->m_sTo.cstr() );
} else if ( eAggrFunc==Aggr_e::DATE_HISTOGRAM )
{
tBuf.Clear();
JsonObjAddAttr ( tBuf, tKey.m_pKey->m_eAttrType, tMatch, tKey.m_pKey->m_tLocator );
tOut.Sprintf ( R"("key":%s)", tBuf.cstr() );
tBuf.Clear();
time_t tSrcTime = tMatch.GetAttr ( tKey.m_pKey->m_tLocator );
FormatDate ( tSrcTime, tBuf );
tOut.Sprintf ( R"("key_as_string":"%s")", tBuf.cstr() );
} else if ( eAggrFunc==Aggr_e::COMPOSITE )
{
ScopedComma_c sBlock ( tOut, ",", R"("key":{)", "}" );
for ( const auto & tItem : tKey.m_dCompositeKeys )
JsonObjAddAttr ( tOut, tItem.m_eAttrType, tItem.m_sName, tMatch, tItem.m_tLocator );
} else if ( eFormat==ResultSetFormat_e::MntSearch )
{
JsonObjAddAttr ( tOut, tKey.m_pKey->m_eAttrType, "key", tMatch, tKey.m_pKey->m_tLocator );
} else
{
// FIXME!!! remove after proper data type added but now need to multiple datatime values by 1000 for compat aggs result set
int iMulti = 1;
if ( eFormat==ResultSetFormat_e::ES && hDatetime [ tKey.m_pKey->m_sName ] )
iMulti = 1000;
tBuf.Clear();
JsonObjAddAttr ( tBuf, tKey.m_pKey->m_eAttrType, tMatch, tKey.m_pKey->m_tLocator, iMulti );
tOut.Sprintf ( R"("key":%s)", tBuf.cstr() );
if ( tKey.m_pKey->m_eAttrType==SPH_ATTR_STRINGPTR )
tOut.Sprintf ( R"("key_as_string":%s)", tBuf.cstr() );
else
tOut.Sprintf ( R"("key_as_string":"%s")", tBuf.cstr() );
}
}
static VecTraits_T<CSphMatch> GetResultMatches ( const VecTraits_T<CSphMatch> & dMatches, const CSphSchema & tSchema, int iOff, int iCount, const JsonAggr_t & tAggr )
{
bool bHasCompositeAfter = ( dMatches.GetLength() && tAggr.m_eAggrFunc==Aggr_e::COMPOSITE && tAggr.m_dCompositeAfterKey.GetLength() );
if ( !bHasCompositeAfter )
return dMatches.Slice ( iOff, iCount );
CSphString sError;
CreateFilterContext_t tCtx;
tCtx.m_pFilters = &tAggr.m_dCompositeAfterKey;
tCtx.m_pMatchSchema = &tSchema;
tCtx.m_bScan = true;
if ( !sphCreateFilters ( tCtx, sError, sError ) || !sError.IsEmpty() )
{
sphWarning ( "failed to create \"after\" filter: %s", sError.cstr() );
return dMatches.Slice ( iOff, iCount );
}
int iFound = dMatches.GetFirst ( [&] ( const CSphMatch & tMatch ) { return tCtx.m_pFilter->Eval ( tMatch ); } );
if ( iOff<0 )
return dMatches.Slice ( iOff, iCount );
else
return dMatches.Slice ( iFound+1, iCount );
}
static bool IsSingleValue ( Aggr_e eAggr )
{
return ( eAggr==Aggr_e::MIN || eAggr==Aggr_e::MAX || eAggr==Aggr_e::SUM || eAggr==Aggr_e::AVG );
}
static void EncodeAggr ( const JsonAggr_t & tAggr, int iAggrItem, const AggrResult_t & tRes, ResultSetFormat_e eFormat, const sph::StringSet & hDatetime, int iNow, const CSphString & sDistinctName, JsonEscapedBuilder & tOut )
{
if ( tAggr.m_eAggrFunc==Aggr_e::COUNT )
return;
const CSphColumnInfo * pCount = tRes.m_tSchema.GetAttr ( "count(*)" );
AggrKeyTrait_t tKey;
bool bHasKey = GetAggrKey ( tAggr, tRes.m_tSchema, iAggrItem, iNow, tKey );
const CSphColumnInfo * pDistinct = nullptr;
if ( !sDistinctName.IsEmpty() )
pDistinct = tRes.m_tSchema.GetAttr ( sDistinctName.cstr() );
// might be null for empty result set
auto dMatches = GetResultMatches ( tRes.m_dResults.First().m_dMatches, tRes.m_tSchema, tRes.m_iOffset, tRes.m_iCount, tAggr );
CSphString sBucketName;
sBucketName.SetSprintf ( R"("%s":{)", tAggr.m_sBucketName.cstr() );
tOut.StartBlock ( ",", sBucketName.cstr(), "}" );
// aggr.significant
switch ( tAggr.m_eAggrFunc )
{
case Aggr_e::SIGNIFICANT: // FIXME!!! add support
tOut.Appendf ( "\"doc_count\":" INT64_FMT ",", tRes.m_iTotalMatches );
tOut.Appendf ( "\"bg_count\":" INT64_FMT ",", tRes.m_iTotalMatches );
break;
default: break;
}
// after_key for aggr.composite
if ( bHasKey && pCount && tAggr.m_eAggrFunc==Aggr_e::COMPOSITE && dMatches.GetLength() )
{
tOut.StartBlock ( ",", R"("after_key":{)", "}" );
for ( const auto & tItem : tKey.m_dCompositeKeys )
JsonObjAddAttr ( tOut, tItem.m_eAttrType, tItem.m_sName, dMatches.Last(), tItem.m_tLocator );
tOut.FinishBlock ( false ); // named bucket obj
}
if ( !IsSingleValue ( tAggr.m_eAggrFunc ) )
{
// buckets might be named objects or array
if ( tKey.m_bKeyed )
tOut.StartBlock ( ",", R"("buckets":{)", "}" );
else
tOut.StartBlock ( ",", R"("buckets":[)", "]" );
// might be null for empty result set
if ( bHasKey && pCount )
{
JsonEscapedBuilder tPrefixBucketBlock;
JsonEscapedBuilder tBufMatch;
for ( const CSphMatch & tMatch : dMatches )
{
RangeKeyDesc_t * pRange = nullptr;
if ( tAggr.m_eAggrFunc==Aggr_e::RANGE || tAggr.m_eAggrFunc==Aggr_e::DATE_RANGE )
{
int iBucket = tMatch.GetAttr ( tKey.m_pKey->m_tLocator );
pRange = tKey.m_tRangeNames ( iBucket );
// lets skip bucket with out of ranges index, ie _all
if ( !pRange )
continue;
}
// bucket item is array item or dict item
const char * sBucketPrefix = GetBucketPrefix ( tKey, tAggr.m_eAggrFunc, pRange, tMatch, tPrefixBucketBlock );
ScopedComma_c sBucketBlock ( tOut, ",", sBucketPrefix, "}" );
PrintKey ( tKey, tAggr.m_eAggrFunc, pRange, tMatch, eFormat, hDatetime, tBufMatch, tOut );
JsonObjAddAttr ( tOut, pCount->m_eAttrType, "doc_count", tMatch, pCount->m_tLocator );
// FIXME!!! add support
if ( tAggr.m_eAggrFunc==Aggr_e::SIGNIFICANT )
{
tOut.Sprintf ( R"("score":0.001)" );
JsonObjAddAttr ( tOut, pCount->m_eAttrType, "bg_count", tMatch, pCount->m_tLocator );
}
if ( pDistinct )
JsonObjAddAttr ( tOut, pDistinct->m_eAttrType, pDistinct->m_sName.cstr(), tMatch, pDistinct->m_tLocator );
}
}
tOut.FinishBlock ( false ); // buckets array
} else
{
if ( bHasKey && pCount && dMatches.GetLength() )
{
const CSphMatch & tMatch = dMatches[0];
JsonObjAddAttr ( tOut, tKey.m_pKey->m_eAttrType, "value", tMatch, tKey.m_pKey->m_tLocator );
}
}
tOut.FinishBlock ( false ); // named bucket obj
}
void JsonRenderAccessSpecs ( JsonEscapedBuilder & tRes, const bson::Bson_c & tBson, bool bWithZones )
{
using namespace bson;
{
ScopedComma_c sFieldsArray ( tRes, ",", "\"fields\":[", "]" );
Bson_c ( tBson.ChildByName ( SZ_FIELDS ) ).ForEach ( [&tRes] ( const NodeHandle_t & tNode ) {
tRes.AppendEscapedWithComma ( String ( tNode ).cstr() );
} );
}
int iPos = (int)Int ( tBson.ChildByName ( SZ_MAX_FIELD_POS ) );
if ( iPos )
tRes.Sprintf ( "\"max_field_pos\":%d", iPos );
if ( !bWithZones )
return;
auto tZones = tBson.GetFirstOf ( { SZ_ZONES, SZ_ZONESPANS } );
ScopedComma_c dZoneDelim ( tRes, ", ", ( tZones.first==1 ) ? "\"zonespans\":[" : "\"zones\":[", "]" );
Bson_c ( tZones.second ).ForEach ( [&tRes] ( const NodeHandle_t & tNode ) {
tRes << String ( tNode );
} );
}
bool JsonRenderKeywordNode ( JsonEscapedBuilder & tRes, const bson::Bson_c& tBson )
{
using namespace bson;
auto tWord = tBson.ChildByName ( SZ_WORD );
if ( IsNullNode ( tWord ) )
return false;
ScopedComma_c sRoot ( tRes.Object() );
tRes << R"("type":"KEYWORD")";
tRes << "\"word\":";
tRes.AppendEscapedSkippingComma ( String ( tWord ).cstr () );
tRes.Sprintf ( R"("querypos":%d)", Int ( tBson.ChildByName ( SZ_QUERYPOS ) ) );
if ( Bool ( tBson.ChildByName ( SZ_EXCLUDED ) ) )
tRes << R"("excluded":true)";
if ( Bool ( tBson.ChildByName ( SZ_EXPANDED ) ) )
tRes << R"("expanded":true)";
if ( Bool ( tBson.ChildByName ( SZ_FIELD_START ) ) )
tRes << R"("field_start":true)";
if ( Bool ( tBson.ChildByName ( SZ_FIELD_END ) ) )
tRes << R"("field_end":true)";
if ( Bool ( tBson.ChildByName ( SZ_FIELD_END ) ) )
tRes << R"("morphed":true)";
auto tBoost = tBson.ChildByName ( SZ_BOOST );
if ( !IsNullNode ( tBoost ) )
{
auto fBoost = Double ( tBoost );
if ( fBoost!=1.0f ) // really comparing floats?
tRes.Sprintf ( R"("boost":%f)", fBoost );
}
return true;
}
void FormatJsonPlanFromBson ( JsonEscapedBuilder& tOut, bson::NodeHandle_t dBson, PLAN_FLAVOUR ePlanFlavour )
{
using namespace bson;
if ( dBson==nullnode )
return;
if ( ePlanFlavour == PLAN_FLAVOUR::EDESCR )
{
auto dRootBlock = tOut.ObjectBlock();
tOut << "\"description\":";
tOut.AppendEscapedSkippingComma ( sph::RenderBsonPlanBrief ( dBson ).cstr() );
tOut.FinishBlocks ( dRootBlock );
return;
}
Bson_c tBson ( dBson );
if ( JsonRenderKeywordNode ( tOut, tBson) )
return;
auto dRootBlock = tOut.ObjectBlock();
tOut << "\"type\":";
tOut.AppendEscapedSkippingComma ( String ( tBson.ChildByName ( SZ_TYPE ) ).cstr() );
if ( ePlanFlavour==PLAN_FLAVOUR::EBOTH )
{
tOut << "\"description\":";
tOut.AppendEscapedSkippingComma ( sph::RenderBsonPlanBrief ( dBson ).cstr () );
}
Bson_c ( tBson.ChildByName ( SZ_OPTIONS ) ).ForEach ( [&tOut] ( CSphString&& sName, const NodeHandle_t & tNode ) {
tOut.Sprintf ( R"("options":"%s=%d")", sName.cstr (), (int) Int ( tNode ) );
} );
JsonRenderAccessSpecs ( tOut, dBson, true );
tOut.StartBlock ( ",", "\"children\":[", "]" );
Bson_c ( tBson.ChildByName ( SZ_CHILDREN ) ).ForEach ( [&] ( const NodeHandle_t & tNode ) {
FormatJsonPlanFromBson ( tOut, tNode, ePlanFlavour );
} );
tOut.FinishBlocks ( dRootBlock );
}
} // static
CSphString JsonEncodeResultError ( const CSphString & sError, int iStatus )
{
JsonEscapedBuilder tOut;
CSphString sResult;
tOut.StartBlock ( ",", "{ \"error\":", "}" );
tOut.AppendEscaped ( sError.cstr(), EscBld::eEscape );
tOut.AppendName ( "status" );
tOut << iStatus;
tOut.FinishBlock ( false );
tOut.MoveTo ( sResult ); // since simple return tOut.cstr() will cause copy of string, then returning it.
return sResult;
}
static CSphString JsonEncodeResultError ( const CSphString & sError, const char * sErrorType=nullptr, int * pStatus=nullptr, const char * sIndex=nullptr )
{
JsonEscapedBuilder tOut;
CSphString sResult;
tOut.StartBlock ( ",", "{", "}" );
tOut.StartBlock ( ",", R"("error":{)", "}" );
tOut.AppendName ( "type" );
tOut.AppendEscaped ( ( sErrorType ? sErrorType : "Error" ), EscBld::eEscape );
tOut.AppendName ( "reason" );
tOut.AppendEscaped ( sError.cstr(), EscBld::eEscape );
if ( sIndex )
{
tOut.AppendName ( "table" );
tOut.AppendEscaped ( sIndex, EscBld::eEscape );
}
tOut.FinishBlock ( false );
if ( pStatus )
{
tOut.AppendName ( "status" );
tOut << *pStatus;
}
tOut.FinishBlock ( false );
tOut.MoveTo ( sResult ); // since simple return tOut.cstr() will cause copy of string, then returning it.
return sResult;
}
CSphString JsonEncodeResultError ( const CSphString & sError, const char * sErrorType, int iStatus )
{
return JsonEncodeResultError ( sError, sErrorType, &iStatus );
}
CSphString JsonEncodeResultError ( const CSphString & sError, const char * sErrorType, int iStatus, const char * sIndex )
{
return JsonEncodeResultError ( sError, sErrorType, &iStatus, sIndex );
}
CSphString HandleShowProfile ( const QueryProfile_c& p )
{
#define SPH_QUERY_STATE( _name, _desc ) _desc,
static const char* dStates[SPH_QSTATE_TOTAL] = { SPH_QUERY_STATES };
#undef SPH_QUERY_STATES
JsonEscapedBuilder sProfile;
int64_t tmTotal = 0;
int iCount = 0;
for ( int i = 0; i < SPH_QSTATE_TOTAL; ++i )
{
if ( p.m_dSwitches[i] <= 0 )
continue;
tmTotal += p.m_tmTotal[i];
iCount += p.m_dSwitches[i];
}
{
auto arrayw = sProfile.ArrayW();
for ( int i = 0; i < SPH_QSTATE_TOTAL; ++i )
{
if ( p.m_dSwitches[i] <= 0 )
continue;
auto _ = sProfile.ObjectW();
sProfile.NamedString ( "status", dStates[i] );
sProfile.NamedVal ( "duration", FixedFrac_T<int64_t, 6> ( p.m_tmTotal[i] ) );
sProfile.NamedVal ( "switches", p.m_dSwitches[i] );
sProfile.NamedVal ( "percent", FixedFrac_T<int64_t, 2> ( PercentOf ( p.m_tmTotal[i], tmTotal, 2 ) ) );
}
{
auto _ = sProfile.ObjectW();
sProfile.NamedString ( "status", "total" );
sProfile.NamedVal ( "duration", FixedFrac_T<int64_t, 6> ( tmTotal ) );
sProfile.NamedVal ( "switches", iCount );
sProfile.NamedVal ( "percent", FixedFrac_T<int64_t, 2> ( PercentOf ( tmTotal, tmTotal, 2 ) ) );
}
}
return (CSphString)sProfile;
}
CSphString sphEncodeResultJson ( const VecTraits_T<const AggrResult_t *> & dRes, const JsonQuery_c & tQuery, QueryProfile_c * pProfile, ResultSetFormat_e eFormat )
{
assert ( dRes.GetLength()>=1 );
assert ( dRes[0]!=nullptr );
const AggrResult_t & tRes = *dRes[0];
if ( !tRes.m_iSuccesses )
return JsonEncodeResultError ( tRes.m_sError );
JsonEscapedBuilder tOut;
CSphString sResult;
tOut.ObjectBlock();
tOut.Sprintf (R"("took":%d,"timed_out":false)", tRes.m_iQueryTime);
if ( !tRes.m_sWarning.IsEmpty() )
{
tOut.StartBlock ( nullptr, R"("warning":{"reason":)", "}" );
tOut.AppendEscapedWithComma ( tRes.m_sWarning.cstr () );
tOut.FinishBlock ( false );
}
if ( eFormat==ResultSetFormat_e::ES )
tOut += R"("_shards":{ "total": 1, "successful": 1, "skipped": 0, "failed": 0 })";
auto sHitMeta = tOut.StartBlock ( ",", R"("hits":{)", "}" );
tOut.Sprintf ( R"("total":%d)", tRes.m_iTotalMatches );
tOut.Sprintf ( R"("total_relation":%s)", tRes.m_bTotalMatchesApprox ? R"("gte")" : R"("eq")" );
if ( eFormat==ResultSetFormat_e::ES )
tOut += R"("max_score": null)";
const ISphSchema & tSchema = tRes.m_tSchema;
CSphVector<BYTE> dTmp;
CSphBitvec tAttrsToSend;
sphGetAttrsToSend ( tSchema, false, true, tAttrsToSend );
int iHighlightAttr = -1;
int nSchemaAttrs = tSchema.GetAttrsCount();
CSphBitvec dSkipAttrs ( nSchemaAttrs );
for ( int iAttr=0; iAttr<nSchemaAttrs; iAttr++ )
{
if ( !tAttrsToSend.BitGet(iAttr) )
continue;
const CSphColumnInfo & tCol = tSchema.GetAttr(iAttr);
const CSphString & sName = tCol.m_sName;
if ( IsHighlightAttr ( sName ) )
iHighlightAttr = iAttr;
if ( NeedToSkipAttr ( sName, tQuery ) )
dSkipAttrs.BitSet ( iAttr );
if ( eFormat==ResultSetFormat_e::ES && tCol.m_eAttrType==SPH_ATTR_TOKENCOUNT )
dSkipAttrs.BitSet ( iAttr );
}
tOut.StartBlock ( ",", R"("hits":[)", "]" );
const CSphColumnInfo * pId = tSchema.GetAttr ( sphGetDocidName() );
const CSphColumnInfo * pKNNDist = tSchema.GetAttr ( GetKnnDistAttrName() );
bool bCompatId = false;
const CSphColumnInfo * pCompatRaw = nullptr;
const CSphColumnInfo * pCompatVer = nullptr;
if ( eFormat==ResultSetFormat_e::ES )
{
const CSphColumnInfo * pCompatId = tSchema.GetAttr ( "_id" );
if ( pCompatId )
{
bCompatId = true;
pId = pCompatId;
}
pCompatRaw = tSchema.GetAttr ( "_raw" );
pCompatVer = tSchema.GetAttr ( "_version" );
}
bool bTag = tRes.m_bTagsAssigned;
int iTag = ( bTag ? 0 : tRes.m_dResults.First().m_iTag );
auto dMatches = tRes.m_dResults.First ().m_dMatches.Slice ( tRes.m_iOffset, tRes.m_iCount );
for ( const auto & tMatch : dMatches )
{
ScopedComma_c sQueryComma ( tOut, ",", "{", "}" );
// note, that originally there is string UID, so we just output number in quotes for docid here
if ( bCompatId )
{
JsonObjAddAttr ( tOut, pId->m_eAttrType, "_id", tMatch, pId->m_tLocator );
tOut.Sprintf ( R"("_score":%d)", tMatch.m_iWeight );
}
else if ( pId )
{
DocID_t tDocID = tMatch.GetAttr ( pId->m_tLocator );
tOut.Sprintf ( R"("_id":%U,"_score":%d)", tDocID, tMatch.m_iWeight );
}
else
tOut.Sprintf ( R"("_score":%d)", tMatch.m_iWeight );
if ( eFormat==ResultSetFormat_e::ES )
{
tOut.Sprintf ( R"("_index":"%s")", tRes.m_dIndexNames[bTag ? tMatch.m_iTag : iTag].scstr() ); // FIXME!!! breaks for multiple indexes
tOut += R"("_type": "doc")";
if ( pCompatVer )
JsonObjAddAttr ( tOut, pCompatVer->m_eAttrType, "_version", tMatch, pCompatVer->m_tLocator );
else
tOut += R"("_version": 1)";
}
if ( pKNNDist )
tOut.Sprintf( R"("_knn_dist":%f)", tMatch.GetAttrFloat ( pKNNDist->m_tLocator ) );
tOut.StartBlock ( ",", "\"_source\":{", "}");
if ( pCompatRaw )
JsonObjAddAttr ( tOut, pCompatRaw->m_eAttrType, "_raw", tMatch, pCompatRaw->m_tLocator );
else
for ( int iAttr=0; iAttr<nSchemaAttrs; iAttr++ )
{
if ( !tAttrsToSend.BitGet(iAttr) )
continue;
if ( dSkipAttrs.BitGet ( iAttr ) )
continue;
const CSphColumnInfo & tCol = tSchema.GetAttr(iAttr);
JsonObjAddAttr ( tOut, tCol.m_eAttrType, tCol.m_sName.cstr(), tMatch, tCol.m_tLocator );
}
tOut.FinishBlock ( false ); // _source obj
if ( iHighlightAttr!=-1 )
EncodeHighlight ( tMatch, iHighlightAttr, tSchema, tOut );
if ( eFormat==ResultSetFormat_e::ES )
{
if ( tQuery.m_dDocFields.GetLength() )
EncodeFields ( tQuery.m_dDocFields, tRes, tMatch, tSchema, false, R"("fields":{)", "}", tOut );
if ( tQuery.m_dSortFields.GetLength() )
EncodeFields ( tQuery.m_dSortFields, tRes, tMatch, tSchema, true, R"("sort":[)", "]", tOut );
}
}
tOut.FinishBlocks ( sHitMeta, false ); // hits array, hits meta
if ( dRes.GetLength()>1 )
{
sph::StringSet hDatetime;
if ( eFormat==ResultSetFormat_e::ES )
{
tQuery.m_dDocFields.for_each ( [&hDatetime]( const auto & tDocfield )
{
if ( tDocfield.m_bDateTime )
hDatetime.Add ( tDocfield.m_sName );
});
}
CSphString sDistinctName;
tQuery.m_dItems.any_of ( [&]( const CSphQueryItem & tItem ) {
if ( tItem.m_sExpr=="@distinct" )
{
sDistinctName = tItem.m_sAlias;
return true;
} else
return false;
});
assert ( dRes.GetLength()==tQuery.m_dAggs.GetLength()+1 );
tOut.StartBlock ( ",", R"("aggregations":{)", "}");
ARRAY_FOREACH ( i, tQuery.m_dAggs )
EncodeAggr ( tQuery.m_dAggs[i], i, *dRes[i+1], eFormat, hDatetime, tQuery.m_iNow, sDistinctName, tOut );
tOut.FinishBlock ( false ); // aggregations obj
}
if ( eFormat==ResultSetFormat_e::ES )
tOut += R"("status": 200)";
if ( pProfile && pProfile->m_bNeedProfile )
{
auto sProfile = HandleShowProfile ( *pProfile );
tOut.Sprintf ( R"("profile":{"query":%s})", sProfile.cstr () );
}
if ( pProfile && pProfile->m_eNeedPlan != PLAN_FLAVOUR::ENONE )
{
JsonEscapedBuilder sPlan;
FormatJsonPlanFromBson ( sPlan, bson::MakeHandle ( pProfile->m_dPlan ), pProfile->m_eNeedPlan );
if ( sPlan.IsEmpty() )
tOut << R"("plan":null)";
else
tOut.Sprintf ( R"("plan":{"query":%s})", sPlan.cstr() );
}
tOut.FinishBlocks (); tOut.MoveTo ( sResult ); return sResult;
}
JsonObj_c sphEncodeInsertResultJson ( const char * szIndex, bool bReplace, DocID_t tDocId, ResultSetFormat_e eFormat )
{
JsonObj_c tObj;
tObj.AddStr ( ( eFormat==ResultSetFormat_e::ES ? "_index" : "table" ), szIndex );
tObj.AddUint ( "_id", tDocId );
tObj.AddBool ( "created", !bReplace );
tObj.AddStr ( "result", bReplace ? "updated" : "created" );
tObj.AddInt ( "status", bReplace ? 200 : 201 );
return tObj;
}
JsonObj_c sphEncodeTxnResultJson ( const char* szIndex, DocID_t tDocId, int iInserts, int iDeletes, int iUpdates, ResultSetFormat_e eFormat )
{
JsonObj_c tObj;
tObj.AddStr ( ( eFormat==ResultSetFormat_e::ES ? "_index" : "table" ), szIndex );
tObj.AddInt ( "_id", tDocId );
tObj.AddInt ( "created", iInserts );
tObj.AddInt ( "deleted", iDeletes );
tObj.AddInt ( "updated", iUpdates );
bool bReplaced = (iInserts!=0 && iDeletes!=0);
tObj.AddStr ( "result", bReplaced ? "updated" : "created" );
tObj.AddInt ( "status", bReplaced ? 200 : 201 );
return tObj;
}
JsonObj_c sphEncodeUpdateResultJson ( const char * szIndex, DocID_t tDocId, int iAffected, ResultSetFormat_e eFormat )
{
JsonObj_c tObj;
tObj.AddStr ( ( eFormat==ResultSetFormat_e::ES ? "_index" : "table" ), szIndex );
if ( !tDocId )
tObj.AddInt ( "updated", iAffected );
else
{
tObj.AddInt ( "_id", tDocId );
tObj.AddStr ( "result", iAffected ? "updated" : "noop" );
}
return tObj;
}
JsonObj_c sphEncodeDeleteResultJson ( const char * szIndex, DocID_t tDocId, int iAffected, ResultSetFormat_e eFormat )
{
JsonObj_c tObj;
tObj.AddStr ( ( eFormat==ResultSetFormat_e::ES ? "_index" : "table" ), szIndex );
if ( !tDocId )
tObj.AddInt ( "deleted", iAffected );
else
{
tObj.AddInt ( "_id", tDocId );
tObj.AddBool ( "found", !!iAffected );
tObj.AddStr ( "result", iAffected ? "deleted" : "not found" );
}
return tObj;
}
JsonObj_c sphEncodeInsertErrorJson ( const char * szIndex, const char * szError, ResultSetFormat_e eFormat )
{
JsonObj_c tObj, tErr;
tErr.AddStr ( "type", szError );
tErr.AddStr ( ( eFormat==ResultSetFormat_e::ES ? "_index" : "table" ), szIndex );
tObj.AddItem ( "error", tErr );
tObj.AddInt ( "status", HttpGetStatusCodes ( EHTTP_STATUS::_409 ) );
return tObj;
}
bool sphGetResultStats ( const char * szResult, int & iAffected, int & iWarnings, bool bUpdate )
{
JsonObj_c tJsonRoot ( szResult );
if ( !tJsonRoot )
return false;
// no warnings in json results for now
iWarnings = 0;
if ( tJsonRoot.HasItem("error") )
{
iAffected = 0;
return true;
}
// its either update or delete
CSphString sError;
JsonObj_c tAffected = tJsonRoot.GetIntItem ( bUpdate ? "updated" : "deleted", sError );
if ( tAffected )
{
iAffected = (int)tAffected.IntVal();
return true;
}
// it was probably a query with an "_id"
JsonObj_c tId = tJsonRoot.GetIntItem ( "_id", sError );
if ( tId )
{
iAffected = 1;
return true;
}
return false;
}
//////////////////////////////////////////////////////////////////////////
// Highlight
static void FormatSnippetOpts ( const CSphString & sQuery, const SnippetQuerySettings_t & tSnippetQuery, CSphQuery & tQuery )
{
StringBuilder_c sItem;
sItem << "HIGHLIGHT(";
sItem << tSnippetQuery.AsString();
sItem << ",";
auto & hFieldHash = tSnippetQuery.m_hPerFieldLimits;
if ( tSnippetQuery.m_hPerFieldLimits.GetLength() )
{
sItem.StartBlock ( ",", "'", "'" );
for ( const auto& tField : hFieldHash )
sItem << tField.first;
sItem.FinishBlock(false);
}
else
sItem << "''";
if ( !sQuery.IsEmpty() )
sItem.Appendf ( ",'%s'", sQuery.cstr() );
sItem << ")";
CSphQueryItem & tItem = tQuery.m_dItems.Add();
tItem.m_sExpr = sItem.cstr ();
tItem.m_sAlias.SetSprintf ( "%s", g_szHighlight );
}
static bool ParseFieldsArray ( const JsonObj_c & tFields, SnippetQuerySettings_t & tSettings, CSphString & sError )
{
for ( const auto & tField : tFields )
{
if ( !tField.IsStr() )
{
sError.SetSprintf ( "\"%s\" field should be an string", tField.Name() );
return false;
}
SnippetLimits_t tDefault;
tSettings.m_hPerFieldLimits.Add( tDefault, tField.StrVal() );
}
return true;
}
static bool ParseSnippetLimitsElastic ( const JsonObj_c & tSnip, SnippetLimits_t & tLimits, CSphString & sError )
{
if ( !tSnip.FetchIntItem ( tLimits.m_iLimit, "fragment_size", sError, true ) ) return false;
if ( !tSnip.FetchIntItem ( tLimits.m_iLimitPassages, "number_of_fragments", sError, true ) ) return false;
return true;
}
static bool ParseSnippetLimitsSphinx ( const JsonObj_c & tSnip, SnippetLimits_t & tLimits, CSphString & sError )
{
if ( !tSnip.FetchIntItem ( tLimits.m_iLimit, "limit", sError, true ) ) return false;
if ( !tSnip.FetchIntItem ( tLimits.m_iLimitPassages, "limit_passages", sError, true ) ) return false;
if ( !tSnip.FetchIntItem ( tLimits.m_iLimitPassages, "limit_snippets", sError, true ) ) return false;
if ( !tSnip.FetchIntItem ( tLimits.m_iLimitWords, "limit_words", sError, true ) ) return false;
return true;
}
static bool ParseFieldsObject ( const JsonObj_c & tFields, SnippetQuerySettings_t & tSettings, CSphString & sError )
{
for ( const auto & tField : tFields )
{
if ( !tField.IsObj() )
{
sError.SetSprintf ( "\"%s\" field should be an object", tField.Name() );
return false;
}
SnippetLimits_t & tLimits = tSettings.m_hPerFieldLimits.AddUnique ( tField.Name() );
if ( !ParseSnippetLimitsElastic ( tField, tLimits, sError ) )
return false;
if ( !ParseSnippetLimitsSphinx ( tField, tLimits, sError ) )
return false;
}
return true;
}
static bool ParseSnippetFields ( const JsonObj_c & tSnip, SnippetQuerySettings_t & tSettings, CSphString & sError )
{
JsonObj_c tFields = tSnip.GetItem("fields");
if ( !tFields )
return true;
if ( tFields.IsArray() )
return ParseFieldsArray ( tFields, tSettings, sError );
if ( tFields.IsObj() )
return ParseFieldsObject ( tFields, tSettings, sError );
sError = R"("fields" property value should be an array or an object)";
return false;
}
static bool FetchTags ( const char * sName, const JsonObj_c & tSnip, CSphString & sVal, CSphString & sError )
{
JsonObj_c tTag = tSnip.GetItem ( sName );
if ( !tTag )
return true;
if ( tTag.IsStr() )
{
sVal = tTag.StrVal();
return true;
}
if ( tTag.IsArray() )
{
if ( tTag.Size() )
sVal = tTag[0].StrVal();
return true;
}
sError.SetSprintf ( R"("%s" property value should be an array or sting)", sName );
return false;
}
static bool ParseSnippetOptsElastic ( const JsonObj_c & tSnip, CSphString & sQuery, SnippetQuerySettings_t & tQuery, CSphString & sError )
{
JsonObj_c tEncoder = tSnip.GetStrItem ( "encoder", sError, true );
if ( tEncoder )
{
if ( tEncoder.StrVal()=="html" )
tQuery.m_sStripMode = "retain";
}
else if ( !sError.IsEmpty() )
return false;
JsonObj_c tHlQuery = tSnip.GetObjItem ( "highlight_query", sError, true );
if ( tHlQuery )
sQuery = tHlQuery.AsString();
else if ( !sError.IsEmpty() )
return false;
if ( !FetchTags ( "pre_tags", tSnip, tQuery.m_sBeforeMatch, sError ) ) return false;
if ( !FetchTags ( "post_tags", tSnip, tQuery.m_sAfterMatch, sError ) ) return false;
JsonObj_c tNoMatchSize = tSnip.GetItem ( "no_match_size" );
if ( tNoMatchSize )
{
int iNoMatch = 0;
if ( !tSnip.FetchIntItem ( iNoMatch, "no_match_size", sError, true ) )
return false;
tQuery.m_bAllowEmpty = iNoMatch<1;
}
JsonObj_c tOrder = tSnip.GetStrItem ( "order", sError, true );
if ( tOrder )
tQuery.m_bWeightOrder = tOrder.StrVal()=="score";
else if ( !sError.IsEmpty() )
return false;
if ( !ParseSnippetLimitsElastic ( tSnip, tQuery, sError ) )
return false;
return true;
}
static bool ParseSnippetOptsSphinx ( const JsonObj_c & tSnip, SnippetQuerySettings_t & tOpt, CSphString & sError )
{
if ( !ParseSnippetLimitsSphinx ( tSnip, tOpt, sError ) )
return false;
if ( !tSnip.FetchStrItem ( tOpt.m_sBeforeMatch, "before_match", sError, true ) ) return false;
if ( !tSnip.FetchStrItem ( tOpt.m_sAfterMatch, "after_match", sError, true ) ) return false;
if ( !tSnip.FetchIntItem ( tOpt.m_iAround, "around", sError, true ) ) return false;
if ( !tSnip.FetchBoolItem ( tOpt.m_bUseBoundaries, "use_boundaries", sError, true ) ) return false;
if ( !tSnip.FetchBoolItem ( tOpt.m_bWeightOrder, "weight_order", sError, true ) ) return false;
if ( !tSnip.FetchBoolItem ( tOpt.m_bForceAllWords, "force_all_words", sError, true ) ) return false;
if ( !tSnip.FetchStrItem ( tOpt.m_sStripMode, "html_strip_mode", sError, true ) ) return false;
if ( !tSnip.FetchBoolItem ( tOpt.m_bAllowEmpty, "allow_empty", sError, true ) ) return false;
if ( !tSnip.FetchBoolItem ( tOpt.m_bEmitZones, "emit_zones", sError, true ) ) return false;
if ( !tSnip.FetchBoolItem ( tOpt.m_bForcePassages, "force_passages", sError, true ) ) return false;
if ( !tSnip.FetchBoolItem ( tOpt.m_bForcePassages, "force_snippets", sError, true ) ) return false;
if ( !tSnip.FetchBoolItem ( tOpt.m_bPackFields, "pack_fields", sError, true ) ) return false;
if ( !tSnip.FetchBoolItem ( tOpt.m_bLimitsPerField, "limits_per_field", sError, true ) )return false;
JsonObj_c tBoundary = tSnip.GetStrItem ( "passage_boundary", "snippet_boundary", sError );
if ( tBoundary )
tOpt.m_ePassageSPZ = GetPassageBoundary ( tBoundary.StrVal() );
else if ( !sError.IsEmpty() )
return false;
return true;
}
static bool ParseSnippet ( const JsonObj_c & tSnip, CSphQuery & tQuery, CSphString & sError )
{
CSphString sQuery;
SnippetQuerySettings_t tSettings;
tSettings.m_bJsonQuery = true;
tSettings.m_bPackFields = true;
if ( !ParseSnippetFields ( tSnip, tSettings, sError ) )
return false;
// elastic-style options
if ( !ParseSnippetOptsElastic ( tSnip, sQuery, tSettings, sError ) )
return false;
// sphinx-style options
if ( !ParseSnippetOptsSphinx ( tSnip, tSettings, sError ) )
return false;
FormatSnippetOpts ( sQuery, tSettings, tQuery );
return true;
}
//////////////////////////////////////////////////////////////////////////
// Sort
struct SortField_t : public GeoDistInfo_c
{
CSphString m_sName;
CSphString m_sMode;
bool m_bAsc {true};
};
static void FormatSortBy ( const CSphVector<SortField_t> & dSort, JsonQuery_c & tQuery, bool & bGotWeight )
{
StringBuilder_c sSortBuf;
Comma_c sComma ({", ",2});
for ( const SortField_t &tItem : dSort )
{
const char * sSort = ( tItem.m_bAsc ? " asc" : " desc" );
if ( tItem.IsGeoDist() )
{
// ORDER BY statement
sSortBuf << sComma << g_szOrder << tItem.m_sName << sSort;
// query item
CSphQueryItem & tQueryItem = tQuery.m_dItems.Add();
tQueryItem.m_sExpr = tItem.BuildExprString();
tQueryItem.m_sAlias.SetSprintf ( "%s%s", g_szOrder, tItem.m_sName.cstr() );
// select list
StringBuilder_c sTmp;
sTmp << tQuery.m_sSelect << ", " << tQueryItem.m_sExpr << " as " << tQueryItem.m_sAlias;
sTmp.MoveTo ( tQuery.m_sSelect );
} else if ( tItem.m_sMode.IsEmpty() )
{
const char * sName = tItem.m_sName.cstr();
if ( tItem.m_sName=="_score" )
sName = "@weight";
else if ( tItem.m_sName=="_count" )
sName = "count(*)";
// sort by attribute or weight
sSortBuf << sComma << sName << sSort;
bGotWeight |= ( tItem.m_sName=="_score" );
} else
{
// sort by MVA
// ORDER BY statement
sSortBuf << sComma << g_szOrder << tItem.m_sName << sSort;
// query item
StringBuilder_c sTmp;
sTmp << ( tItem.m_sMode=="min" ? "least" : "greatest" ) << "(" << tItem.m_sName << ")";
CSphQueryItem & tQueryItem = tQuery.m_dItems.Add();
sTmp.MoveTo (tQueryItem.m_sExpr);
tQueryItem.m_sAlias.SetSprintf ( "%s%s", g_szOrder, tItem.m_sName.cstr() );
// select list
sTmp << tQuery.m_sSelect << ", " << tQueryItem.m_sExpr << " as " << tQueryItem.m_sAlias;
sTmp.MoveTo ( tQuery.m_sSelect );
}
tQuery.m_dSortFields.Add ( tItem.m_sName );
}
if ( !dSort.GetLength() )
{
sSortBuf += "@weight desc";
bGotWeight = true;
}
tQuery.m_eSort = SPH_SORT_EXTENDED;
sSortBuf.MoveTo ( tQuery.m_sSortBy );
}
static bool ParseSortObj ( const JsonObj_c & tSortItem, CSphVector<SortField_t> & dSort, CSphString & sError, CSphString & sWarning )
{
bool bSortString = tSortItem.IsStr();
bool bSortObj = tSortItem.IsObj();
CSphString sSortName = tSortItem.Name();
if ( ( !bSortString && !bSortObj ) || !tSortItem.Name() || ( bSortString && !tSortItem.SzVal() ) )
{
sError.SetSprintf ( R"("sort" property 0("%s") should be %s)", sSortName.scstr(), ( bSortObj ? "a string" : "an object" ) );
return false;
}
// [ { "attr_name" : "sort_mode" } ]
if ( bSortString )
{
CSphString sOrder = tSortItem.StrVal();
if ( sOrder!="asc" && sOrder!="desc" )
{
sError.SetSprintf ( R"("sort" property "%s" order is invalid %s)", sSortName.scstr(), sOrder.cstr() );
return false;
}
SortField_t & tAscItem = dSort.Add();
tAscItem.m_sName = sSortName;
tAscItem.m_bAsc = ( sOrder=="asc" );
return true;
}
// [ { "attr_name" : { "order" : "sort_mode" } } ]
SortField_t & tSortField = dSort.Add();
tSortField.m_sName = sSortName;
JsonObj_c tAttrItems = tSortItem.GetItem("order");
if ( tAttrItems )
{
if ( !tAttrItems.IsStr() )
{
sError.SetSprintf ( R"("sort" property "%s" order is invalid)", tAttrItems.Name() );
return false;
}
CSphString sOrder = tAttrItems.StrVal();
tSortField.m_bAsc = ( sOrder=="asc" );
}
JsonObj_c tMode = tSortItem.GetItem("mode");
if ( tMode )
{
if ( tAttrItems && !tMode.IsStr() )
{
sError.SetSprintf ( R"("mode" property "%s" order is invalid)", tAttrItems.Name() );
return false;
}
CSphString sMode = tMode.StrVal();
if ( sMode!="min" && sMode!="max" )
{
sError.SetSprintf ( R"("mode" supported are "min" and "max", got "%s", not supported)", sMode.cstr() );
return false;
}
tSortField.m_sMode = sMode;
}
// geodist
if ( tSortField.m_sName=="_geo_distance" )
{
if ( tMode )
{
sError = R"("mode" property not supported with "_geo_distance")";
return false;
}
if ( tSortItem.HasItem("unit") )
{
sError = R"("unit" property not supported with "_geo_distance")";
return false;
}
if ( !tSortField.Parse ( tSortItem, false, sError, sWarning ) )
return false;
}
// FXIME!!! "unmapped_type" should be replaced with expression EXIST
// unsupported options
const char * dUnsupported[] = { "missing", "nested_path", "nested_filter"};
for ( auto szOption : dUnsupported )
{
if ( tSortItem.HasItem(szOption) )
{
sError.SetSprintf ( R"("%s" property not supported)", szOption );
return false;
}
}
return true;
}
static bool ParseSort ( const JsonObj_c & tSort, JsonQuery_c & tQuery, bool & bGotWeight, CSphString & sError, CSphString & sWarning )
{
bGotWeight = false;
// unsupported options
if ( tSort.HasItem("_script") )
{
sError = "\"_script\" property not supported";
return false;
}
CSphVector<SortField_t> dSort;
dSort.Reserve ( tSort.Size() );
if ( tSort.IsObj() )
{
if ( !ParseSortObj ( tSort[0], dSort, sError, sWarning ) )
return false;
} else
{
for ( const auto & tItem : tSort )
{
CSphString sName = tItem.Name();
bool bString = tItem.IsStr();
bool bObj = tItem.IsObj();
if ( !bString && !bObj )
{
sError.SetSprintf ( R"("sort" property "%s" should be a string or an object)", sName.scstr() );
return false;
}
if ( bObj && tItem.Size()!=1 )
{
sError.SetSprintf ( R"("sort" property "%s" should be an object)", sName.scstr() );
return false;
}
// [ "attr_name" ]
if ( bString )
{
SortField_t & tSortField = dSort.Add();
tSortField.m_sName = tItem.StrVal();
// order defaults to desc when sorting on the _score, and defaults to asc when sorting on anything else
tSortField.m_bAsc = ( tSortField.m_sName!="_score" );
continue;
}
JsonObj_c tSortItem = tItem[0];
if ( !tSortItem )
{
sError = R"(invalid "sort" property item)";
return false;
}
if ( !ParseSortObj ( tSortItem, dSort, sError, sWarning ) )
return false;
}
}
FormatSortBy ( dSort, tQuery, bGotWeight );
return true;
}
//////////////////////////////////////////////////////////////////////////
// _source / select list
static bool ParseStringArray ( const JsonObj_c & tArray, const char * szProp, StrVec_t & dItems, CSphString & sError )
{
for ( const auto & tItem : tArray )
{
if ( !tItem.IsStr() )
{
sError.SetSprintf ( R"("%s" property should be a string)", szProp );
return false;
}
dItems.Add ( tItem.StrVal() );
}
return true;
}
static bool ParseSelect ( const JsonObj_c & tSelect, CSphQuery & tQuery, CSphString & sError )
{
bool bString = tSelect.IsStr();
bool bArray = tSelect.IsArray();
bool bObj = tSelect.IsObj();
if ( !bString && !bArray && !bObj )
{
sError = R"("_source" property should be a string or an array or an object)";
return false;
}
if ( bString )
{
tQuery.m_dIncludeItems.Add ( tSelect.StrVal() );
if ( tQuery.m_dIncludeItems[0]=="*" || tQuery.m_dIncludeItems[0].IsEmpty() )
tQuery.m_dIncludeItems.Reset();
return true;
}
if ( bArray )
return ParseStringArray ( tSelect, R"("_source")", tQuery.m_dIncludeItems, sError );
assert ( bObj );
// includes part of _source object
JsonObj_c tInclude = tSelect.GetArrayItem ( "includes", sError, true );
if ( tInclude )
{
if ( !ParseStringArray ( tInclude, R"("_source" "includes")", tQuery.m_dIncludeItems, sError ) )
return false;
if ( tQuery.m_dIncludeItems.GetLength()==1 && tQuery.m_dIncludeItems[0]=="*" )
tQuery.m_dIncludeItems.Reset();
} else if ( !sError.IsEmpty() )
return false;
// excludes part of _source object
JsonObj_c tExclude = tSelect.GetArrayItem ( "excludes", sError, true );
if ( tExclude )
{
if ( !ParseStringArray ( tExclude, R"("_source" "excludes")", tQuery.m_dExcludeItems, sError ) )
return false;
} else if ( !sError.IsEmpty() )
return false;
return true;
}
//////////////////////////////////////////////////////////////////////////
// script_fields / expressions
static bool ParseScriptFields ( const JsonObj_c & tExpr, CSphQuery & tQuery, CSphString & sError )
{
if ( !tExpr )
return true;
if ( !tExpr.IsObj() )
{
sError = R"("script_fields" property should be an object)";
return false;
}
StringBuilder_c sSelect;
sSelect << tQuery.m_sSelect;
for ( const auto & tAlias : tExpr )
{
if ( !tAlias.IsObj() )
{
sError = R"("script_fields" properties should be objects)";
return false;
}
if ( CSphString ( tAlias.Name() ).IsEmpty() )
{
sError = R"("script_fields" empty property name)";
return false;
}
JsonObj_c tAliasScript = tAlias.GetItem("script");
if ( !tAliasScript )
{
sError = R"("script_fields" property should have "script" object)";
return false;
}
CSphString sExpr;
if ( !tAliasScript.FetchStrItem ( sExpr, "inline", sError ) )
return false;
const char * dUnsupported[] = { "lang", "params", "stored", "file" };
for ( auto szOption : dUnsupported )
if ( tAliasScript.HasItem(szOption) )
{
sError.SetSprintf ( R"("%s" property not supported in "script_fields")", szOption );
return false;
}
// add to query
CSphQueryItem & tQueryItem = tQuery.m_dItems.Add();
tQueryItem.m_sExpr = sExpr;
tQueryItem.m_sAlias = tAlias.Name();
// add to select list
sSelect.Appendf ( ", %s as %s", tQueryItem.m_sExpr.cstr(), tQueryItem.m_sAlias.cstr() );
}
sSelect.MoveTo ( tQuery.m_sSelect );
return true;
}
static bool ParseExpressions ( const JsonObj_c & tExpr, CSphQuery & tQuery, CSphString & sError )
{
if ( !tExpr )
return true;
if ( !tExpr.IsObj() )
{
sError = R"("expressions" property should be an object)";
return false;
}
StringBuilder_c sSelect;
sSelect << tQuery.m_sSelect;
for ( const auto & tAlias : tExpr )
{
if ( !tAlias.IsStr() )
{
sError = R"("expressions" properties should be strings)";
return false;
}
if ( CSphString ( tAlias.Name() ).IsEmpty() )
{
sError = R"("expressions" empty property name)";
return false;
}
// add to query
CSphQueryItem & tQueryItem = tQuery.m_dItems.Add();
tQueryItem.m_sExpr = tAlias.StrVal();
tQueryItem.m_sAlias = tAlias.Name();
// add to select list
sSelect.Appendf ( ", %s as %s", tQueryItem.m_sExpr.cstr(), tQueryItem.m_sAlias.cstr() );
}
sSelect.MoveTo ( tQuery.m_sSelect );
return true;
}
//////////////////////////////////////////////////////////////////////////
// docvalue_fields
bool ParseDocFields ( const JsonObj_c & tDocFields, JsonQuery_c & tQuery, CSphString & sError )
{
if ( !tDocFields || !tDocFields.IsArray() )
{
sError = R"("docvalue_fields" property should be an array or an object")";
return false;
}
for ( const auto & tItem : tDocFields )
{
if ( !tItem.IsObj() )
{
sError = R"("docvalue_fields" property item should be an object)";
return false;
}
CSphString sFieldName;
if ( !tItem.FetchStrItem ( sFieldName, "field", sError, false ) )
return false;
if ( tQuery.m_dItems.GetFirst ( [&sFieldName] ( const CSphQueryItem & tVal ) { return ( tVal.m_sExpr=="*" || tVal.m_sExpr==sFieldName ); } )==-1 )
{
CSphQueryItem & tDFItem = tQuery.m_dItems.Add();
tDFItem.m_sExpr = sFieldName;
tDFItem.m_sAlias = sFieldName;
}
// FIXME!!! collect format type
bool bDateTime = false;
CSphString sFormat;
if ( tItem.FetchStrItem ( sFormat, "format", sError, true ) )
bDateTime = ( sFormat=="date_time" );
tQuery.m_dDocFields.Add ( { sFieldName, bDateTime } );
}
return true;
}
static Aggr_e GetAggrFunc ( const JsonObj_c & tBucket, bool bCheckAggType )
{
if ( StrEq ( tBucket.Name(), "significant_terms" ) )
return Aggr_e::SIGNIFICANT;
if ( StrEq ( tBucket.Name(), "histogram" ) )
return Aggr_e::HISTOGRAM;
if ( StrEq ( tBucket.Name(), "date_histogram" ) )
return Aggr_e::DATE_HISTOGRAM;
if ( StrEq ( tBucket.Name(), "range") )
return Aggr_e::RANGE;
if ( StrEq ( tBucket.Name(), "date_range") )
return Aggr_e::DATE_RANGE;
if ( StrEq ( tBucket.Name(), "composite") )
return Aggr_e::COMPOSITE;
if ( StrEq ( tBucket.Name(), "min") )
return Aggr_e::MIN;
if ( StrEq ( tBucket.Name(), "max") )
return Aggr_e::MAX;
if ( StrEq ( tBucket.Name(), "sum") )
return Aggr_e::SUM;
if ( StrEq ( tBucket.Name(), "avg") )
return Aggr_e::AVG;
if ( bCheckAggType )
sphWarning ( "unsupported aggregate type '%s'", tBucket.Name() );
return Aggr_e::NONE;
}
static void SetRangeFrom ( const JsonObj_c & tSrc, bool bForceFloat, RangeSetting_t & tItem )
{
if ( tSrc.IsDbl() )
tItem.m_fFrom = tSrc.DblVal();
else if ( bForceFloat )
tItem.m_fFrom = tSrc.IntVal();
else
tItem.m_iFrom = tSrc.IntVal();
}
static void SetRangeTo ( const JsonObj_c & tSrc, bool bForceFloat, RangeSetting_t & tItem )
{
if ( tSrc.IsDbl() )
tItem.m_fTo = tSrc.DblVal();
else if ( bForceFloat )
tItem.m_fTo = tSrc.IntVal();
else
tItem.m_iTo = tSrc.IntVal();
}
static bool GetKeyed ( const JsonObj_c & tBucket, bool & bKeyed, CSphString & sError )
{
if ( !tBucket.HasItem ( "keyed" ) )
return true;
const auto tKeyed = tBucket.GetBoolItem ( "keyed", sError, false );
if ( !tKeyed )
return false;
bKeyed = tKeyed.BoolVal();
return true;
}
static bool ParseAggrRange ( const JsonObj_c & tRanges, const CSphString & sCol, AggrRangeSetting_t & dRanges, CSphString & sError );
static bool ParseAggrRange ( const JsonObj_c & tRanges, const CSphString & sCol, AggrDateRangeSetting_t & dRanges, CSphString & sError );
static bool ParseAggrRange ( const JsonObj_c & tBucket, JsonAggr_t & tItem, bool bDate, CSphString & sError )
{
JsonObj_c tRanges = tBucket.GetItem( "ranges" );
if ( !tRanges || !tRanges.IsArray() )
{
if ( !tRanges )
sError.SetSprintf ( "\"%s\" missed \"ranges\" property", tItem.m_sCol.cstr() );
else
sError.SetSprintf ( "\"%s\" \"ranges\" should be an array", tItem.m_sCol.cstr() );
return false;
}
int iCount = tRanges.Size();
if ( !iCount )
{
sError.SetSprintf ( "\"%s\" empty \"ranges\" property", tItem.m_sCol.cstr() );
return false;
}
bool bKeyed = false;
if ( !GetKeyed ( tBucket, bKeyed, sError ) )
return false;
if ( !bDate )
{
auto & dRanges = tItem.m_tRange;
dRanges.Resize ( iCount );
dRanges.m_bKeyed = bKeyed;
return ParseAggrRange ( tRanges, tItem.m_sCol, dRanges, sError );
} else
{
auto & dRanges = tItem.m_tDateRange;
dRanges.Resize ( iCount );
dRanges.m_bKeyed = bKeyed;
return ParseAggrRange ( tRanges, tItem.m_sCol, dRanges, sError );
}
}
bool ParseAggrRange ( const JsonObj_c & tRanges, const CSphString & sCol, AggrRangeSetting_t & dRanges, CSphString & sError )
{
int iFloatStart = -1;
for ( int i=0; i<dRanges.GetLength(); i++ )
{
const auto tRangeItem = tRanges[i];
const auto tFrom = tRangeItem.GetItem ( "from" );
const auto tTo = tRangeItem.GetItem ( "to" );
const bool bHasFrom = tFrom;
const bool bHasTo = tTo;
if ( !bHasFrom && i!=0 )
{
sError.SetSprintf ( "\"%s\" ranges[%d] \"from\" empty", sCol.cstr(), i );
return false;
}
if ( !bHasTo && i!=dRanges.GetLength()-1 )
{
sError.SetSprintf ( "\"%s\" ranges[%d] \"to\" empty", sCol.cstr(), i );
return false;
}
if ( ( bHasFrom && tFrom.IsDbl() ) || ( bHasTo && tTo.IsDbl() ) )
{
dRanges.m_bFloat = true;
if ( iFloatStart!=-1 )
iFloatStart = i;
}
if ( bHasFrom )
SetRangeFrom ( tFrom, ( iFloatStart!=-1 ), dRanges[i] );
else
dRanges.m_bOpenLeft = true;
if ( bHasTo )
SetRangeTo ( tTo, ( iFloatStart!=-1 ), dRanges[i] );
else
dRanges.m_bOpenRight = true;
}
// convert int to float values for head of array values
if ( iFloatStart>0 )
{
for ( int i=iFloatStart; i<dRanges.GetLength(); i++ )
{
dRanges[i].m_fFrom = dRanges[i].m_iFrom;
dRanges[i].m_fTo = dRanges[i].m_iTo;
}
}
if ( dRanges.m_bOpenLeft )
{
if ( dRanges.m_bFloat )
dRanges[0].m_fFrom = -FLT_MAX;
else
dRanges[0].m_iFrom = -LLONG_MAX;
}
if ( dRanges.m_bOpenRight )
{
if ( dRanges.m_bFloat )
dRanges.Last().m_fTo = FLT_MAX;
else
dRanges.Last().m_iTo = LLONG_MAX;
}
return true;
}
bool ParseAggrRange ( const JsonObj_c & tRanges, const CSphString & sCol, AggrDateRangeSetting_t & dRanges, CSphString & sError )
{
for ( int i=0; i<dRanges.GetLength(); i++ )
{
const auto tRangeItem = tRanges[i];
const auto tFrom = tRangeItem.GetItem ( "from" );
const auto tTo = tRangeItem.GetItem ( "to" );
const bool bHasFrom = tFrom;
const bool bHasTo = tTo;
if ( !bHasFrom && i!=0 )
{
sError.SetSprintf ( "\"%s\" ranges[%d] \"from\" empty", sCol.cstr(), i );
return false;
}
if ( !bHasTo && i!=dRanges.GetLength()-1 )
{
sError.SetSprintf ( "\"%s\" ranges[%d] \"to\" empty", sCol.cstr(), i );
return false;
}
if ( bHasFrom )
dRanges[i].m_sFrom = tFrom.StrVal();
if ( bHasTo )
dRanges[i].m_sTo = tTo.StrVal();
}
return true;
}
static bool ParseAggrHistogram ( const JsonObj_c & tBucket, JsonAggr_t & tItem, CSphString & sError )
{
AggrHistSetting_t & tHist = tItem.m_tHist;
JsonObj_c tInterval = tBucket.GetItem ( "interval" );
if ( tInterval.Empty() )
{
sError.SetSprintf ( "\"%s\" interval missed", tItem.m_sCol.cstr() );
return false;
}
if ( !tInterval.IsNum() )
{
sError.SetSprintf ( "\"%s\" interval should be numeric", tItem.m_sCol.cstr() );
return false;
}
if ( tInterval.IsInt() )
tHist.m_tInterval = tInterval.IntVal();
else
tHist.m_tInterval = tInterval.FltVal();
JsonObj_c tOffset = tBucket.GetItem ( "offset" );
if ( !tOffset.Empty() )
{
if ( !tOffset.IsNum() )
{
sError.SetSprintf ( "\"%s\" offset should be numeric", tItem.m_sCol.cstr() );
return false;
}
if ( tOffset.IsInt() )
tHist.m_tOffset = tOffset.IntVal();
else
tHist.m_tOffset = tOffset.FltVal();
} else
{
tHist.m_tOffset = INT64_C ( 0 );
}
if ( !GetKeyed ( tBucket, tHist.m_bKeyed, sError ) )
return false;
FixFloat ( tHist );
return true;
}
static bool ParseAggrDateHistogram ( const JsonObj_c & tBucket, JsonAggr_t & tItem, CSphString & sError )
{
AggrDateHistSetting_t & tHist = tItem.m_tDateHist;
JsonObj_c tInterval = tBucket.GetItem ( "calendar_interval" );
if ( tInterval.Empty() )
{
sError.SetSprintf ( "\"%s\" calendar_interval missed", tItem.m_sCol.cstr() );
return false;
}
if ( !tInterval.IsStr() )
{
sError.SetSprintf ( "\"%s\" calendar_interval should be string", tItem.m_sCol.cstr() );
return false;
}
tHist.m_sInterval = tInterval.StrVal();
if ( !GetKeyed ( tBucket, tHist.m_bKeyed, sError ) )
return false;
return true;
}
static bool ParseAggrComposite ( const JsonObj_c & tBucket, JsonAggr_t & tAggr, CSphString & sError )
{
JsonObj_c tComposite = tBucket.GetObjItem ( "composite", sError, false );
if ( !tComposite )
return false;
JsonObj_c tSource = tComposite.GetArrayItem ( "sources", sError, false );
if ( !tSource )
return false;
if ( !tSource.IsArray() )
{
sError = R"("sources" property item should be an array)";
return false;
}
SmallStringHash_T<AggrComposite_t> hColumns;
for ( const auto & tArrayItem : tSource )
{
if ( !tArrayItem.IsObj() )
{
sError = R"("sources" items should be an object)";
return false;
}
JsonObj_c tItem = tArrayItem.begin();
JsonObj_c tTerms = tItem.GetObjItem ( "terms", sError, false );
if ( !tTerms )
return false;
AggrComposite_t tCol;
if ( !tTerms.FetchStrItem ( tCol.m_sColumn, "field", sError, false ) )
return false;
tCol.m_sAlias = tItem.Name();
if ( !hColumns.Add ( tCol, tItem.Name() ) )
{
sError.SetSprintf ( R"("composite" has multiple "%s" aggregates)", tItem.Name() );
return false;
}
}
if ( hColumns.IsEmpty() )
{
sError = R"(empty "composite" aggregate)";
return false;
}
JsonObj_c tAfter = tComposite.GetObjItem ( "after", sError, false );
if ( tAfter && tAfter.Size() )
{
JsonObj_c tJsonQuery ( R"( {"query":{"bool":{"must":[] }}} )" );
JsonObj_c tFilters = tJsonQuery.GetItem ( "query" ).GetItem ( "bool" ).GetItem ( "must" );
for ( const auto & tItem : tAfter )
{
AggrComposite_t * pCol = hColumns ( tItem.Name() );
if ( !pCol )
{
sError.SetSprintf ( R"("after" missed "%s" aggregate)", tItem.Name() );
return false;
}
JsonObj_c tFilterVal = tItem.Clone();
JsonObj_c tEqItem ( R"( {"equals":{} } )") ;
tEqItem.begin().AddItem ( pCol->m_sColumn.cstr(), tFilterVal );
tFilters.AddItem ( tEqItem );
}
CSphQuery tTmpQuery;
if ( !ParseJsonQueryFilters ( tJsonQuery.GetItem( "query" ), tTmpQuery, sError, sError ) )
return false;
if ( !sError.IsEmpty() )
return false;
assert ( tTmpQuery.m_dFilterTree.IsEmpty() );
tAggr.m_dCompositeAfterKey = std::move ( tTmpQuery.m_dFilters );
}
tAggr.m_iSize = DEFAULT_MAX_MATCHES;
tComposite.FetchIntItem ( tAggr.m_iSize, "size", sError, true );
StringBuilder_c sColName ( "," );
tAggr.m_dComposite.Reserve ( hColumns.GetLength() );
for ( const auto & tCol : hColumns )
{
sColName += tCol.second.m_sColumn.cstr();
tAggr.m_dComposite.Add ( tCol.second );
}
tAggr.m_sCol = sColName.cstr();
return true;
}
static bool ParseAggsNode ( const JsonObj_c & tBucket, const JsonObj_c & tJsonItem, bool bRoot, JsonAggr_t & tItem, CSphString & sError )
{
if ( !tBucket.IsObj() )
{
sError.SetSprintf ( R"("aggs" bucket '%s' should be an object)", tItem.m_sBucketName.cstr() );
return false;
}
if ( !StrEq ( tBucket.Name(), "composite" ) && !tBucket.FetchStrItem ( tItem.m_sCol, "field", sError, false ) )
return false;
tBucket.FetchIntItem ( tItem.m_iSize, "size", sError, true );
int iShardSize = 0;
tBucket.FetchIntItem ( iShardSize, "shard_size", sError, true );
tItem.m_iSize = Max ( tItem.m_iSize, iShardSize ); // FIXME!!! use (size * 1.5 + 10) for shard size
tItem.m_eAggrFunc = GetAggrFunc ( tBucket, !bRoot );
switch ( tItem.m_eAggrFunc )
{
case Aggr_e::DATE_HISTOGRAM:
if ( !ParseAggrDateHistogram ( tBucket, tItem, sError ) )
return false;
tItem.m_iSize = Max ( tItem.m_iSize, 1000 ); // set max_matches to min\max / interval
break;
case Aggr_e::HISTOGRAM:
if ( !ParseAggrHistogram ( tBucket, tItem, sError ) )
return false;
tItem.m_iSize = Max ( tItem.m_iSize, 1000 ); // set max_matches to min\max / interval
break;
case Aggr_e::RANGE:
if ( !ParseAggrRange ( tBucket, tItem, false, sError ) )
return false;
tItem.m_iSize = Max ( tItem.m_iSize, tItem.m_tRange.GetLength() + 1 ); // set max_matches to buckets count + _all bucket
break;
case Aggr_e::DATE_RANGE:
if ( !ParseAggrRange ( tBucket, tItem, true, sError ) )
return false;
tItem.m_iSize = Max ( tItem.m_iSize, tItem.m_tDateRange.GetLength() + 1 ); // set max_matches to buckets count + _all bucket
break;
case Aggr_e::COMPOSITE:
if ( !ParseAggrComposite ( tJsonItem, tItem, sError ) )
return false;
break;
case Aggr_e::MIN:
case Aggr_e::MAX:
case Aggr_e::SUM:
case Aggr_e::AVG:
tItem.m_iSize = 1;
break;
default: break;
}
return true;
}
static bool ParseAggsNodeSort ( const JsonObj_c & tJsonItem, bool bOrder, JsonAggr_t & tItem, CSphString & sError )
{
if ( !( tJsonItem.IsArray() || tJsonItem.IsObj() ) )
{
sError.SetSprintf ( "\"%s\" property value should be an array or an object", ( bOrder ? "order" : "sort" ) );
return false;
}
bool bGotWeight = false;
JsonQuery_c tTmpQuery;
tTmpQuery.m_sSortBy = "";
tTmpQuery.m_eSort = SPH_SORT_RELEVANCE;
// FIXME!!! reports warnings for geodist sort
CSphString sWarning;
if ( !ParseSort ( tJsonItem, tTmpQuery, bGotWeight, sError, sWarning ) )
return false;
tItem.m_sSort = tTmpQuery.m_sSortBy;
return true;
}
static bool AddSubAggregate ( const JsonObj_c & tAggs, bool bRoot, CSphVector<JsonAggr_t> & dParentItems, CSphString & sError )
{
if ( bRoot && tAggs.begin().Empty() )
{
JsonAggr_t & tCount = dParentItems.Add();
tCount.m_eAggrFunc = Aggr_e::COUNT;
tCount.m_iSize = 1;
return true;
}
for ( const auto & tJsonItem : tAggs )
{
if ( !tJsonItem.IsObj() )
{
sError = R"("aggs" property item should be an object)";
return false;
}
JsonAggr_t tItem;
tItem.m_sBucketName = tJsonItem.Name();
for ( const auto & tAggsItem : tJsonItem )
{
// could be a sort object at the aggs item or order object at the bucket
if ( strcmp ( tAggsItem.Name(), "sort" )==0 )
{
if ( !ParseAggsNodeSort ( tAggsItem, false, tItem, sError ) )
return false;
} else
{
if ( StrEq ( tAggsItem.Name(), "aggs" ) || tAggsItem.HasItem ( "aggs" ) )
{
sError = R"(nested "aggs" is not supported)";
return false;
}
if ( tAggsItem==tAggsItem.end() )
{
sError.SetSprintf ( R"("aggs" bucket '%s' with only nested items)", tAggsItem.Name() );
return false;
}
if ( !ParseAggsNode ( tAggsItem, tJsonItem, bRoot, tItem, sError ) )
return false;
// bucket could have its own order item
if ( tAggsItem.HasItem ( "order" ) )
{
if ( !ParseAggsNodeSort ( tAggsItem.GetItem("order"), true, tItem, sError ) )
return false;
}
}
}
if ( tItem.m_eAggrFunc==Aggr_e::NONE && !bRoot )
{
sError.SetSprintf ( R"(bucket '%s' without aggregate items)", tItem.m_sBucketName.cstr() );
return false;
}
dParentItems.Add ( tItem );
}
return true;
}
bool ParseAggregates ( const JsonObj_c & tAggs, JsonQuery_c & tQuery, CSphString & sError )
{
if ( !tAggs || !tAggs.IsObj() )
{
sError = R"("aggs" property should be an object")";
return false;
}
if ( !AddSubAggregate ( tAggs, true, tQuery.m_dAggs, sError ) )
return false;
// set query now for any date aggregate to make sure they will have the same now timestamp
if ( tQuery.m_dAggs.any_of ( [] ( const JsonAggr_t & tAggr ) { return !tAggr.m_tDateRange.IsEmpty(); } ) )
tQuery.m_iNow = time ( nullptr );
return true;
}
CSphString JsonAggr_t::GetAliasName () const
{
CSphString sName;
sName.SetSprintf ( "%s_%s", m_sCol.cstr(), m_sBucketName.cstr() );
return sName;
}
| 111,426
|
C++
|
.cpp
| 3,321
| 30.682626
| 314
| 0.676582
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,883
|
sphinx.cpp
|
manticoresoftware_manticoresearch/src/sphinx.cpp
|
//
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinx.h"
#include "sphinxstem.h"
#include "sphinxquery.h"
#include "sphinxutils.h"
#include "sphinxsort.h"
#include "fileutils.h"
#include "sphinxexpr.h"
#include "sphinxfilter.h"
#include "sphinxint.h"
#include "sphinxsearch.h"
#include "searchnode.h"
#include "sphinxjson.h"
#include "sphinxplugin.h"
#include "sphinxqcache.h"
#include "icu.h"
#include "jieba.h"
#include "attribute.h"
#include "secondaryindex.h"
#include "docidlookup.h"
#include "histogram.h"
#include "killlist.h"
#include "docstore.h"
#include "global_idf.h"
#include "indexformat.h"
#include "indexcheck.h"
#include "coroutine.h"
#include "columnarlib.h"
#include "columnarmisc.h"
#include "columnarfilter.h"
#include "mini_timer.h"
#include "sphinx_alter.h"
#include "conversion.h"
#include "binlog.h"
#include "task_info.h"
#include "client_task_info.h"
#include "chunksearchctx.h"
#include "std/lrucache.h"
#include "std/sys.h"
#include "indexfiles.h"
#include "task_dispatcher.h"
#include "secondarylib.h"
#include "knnlib.h"
#include "attrindex_merge.h"
#include "knnmisc.h"
#include "querycontext.h"
#include "dict/infix/infix_builder.h"
#include "skip_cache.h"
#include "jsonsi.h"
#include "tracer.h"
#include <errno.h>
#include <ctype.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <sys/stat.h>
#include <time.h>
#include <math.h>
#include <algorithm>
#if WITH_RE2
#include <string>
#include <re2/re2.h>
#endif
#if !_WIN32
#include <unistd.h>
#include <sys/time.h>
#endif
/////////////////////////////////////////////////////////////////////////////
// logf() is not there sometimes (eg. Solaris 9)
#if !_WIN32 && !HAVE_LOGF
static inline float logf ( float v )
{
return (float) log ( v );
}
#endif
#if _WIN32
void localtime_r ( const time_t * clock, struct tm * res )
{
tm * pRes = localtime ( clock );
if ( pRes )
*res = *pRes;
}
void gmtime_r ( const time_t * clock, struct tm * res )
{
tm * pRes = gmtime ( clock );
if ( pRes )
*res = *pRes;
}
#endif
#include <boost/preprocessor/repetition/repeat.hpp>
#include "attrindex_builder.h"
#include "stripper/html_stripper.h"
#include "queryfilter.h"
#include "indexing_sources/source_document.h"
#include "indexing_sources/source_stats.h"
#include "dict/dict_base.h"
#include "dict/bin.h"
/////////////////////////////////////////////////////////////////////////////
// GLOBALS
/////////////////////////////////////////////////////////////////////////////
const char * MAGIC_WORD_SENTENCE = "\3sentence"; // emitted from source on sentence boundary, stored in dictionary
const char * MAGIC_WORD_PARAGRAPH = "\3paragraph"; // emitted from source on paragraph boundary, stored in dictionary
bool g_bJsonStrict = false;
bool g_bJsonAutoconvNumbers = false;
bool g_bJsonKeynamesToLowercase = false;
static const int MIN_READ_BUFFER = 8192;
static const int MIN_READ_UNHINTED = 1024;
static int g_iReadUnhinted = DEFAULT_READ_UNHINTED;
static bool g_bPseudoSharding = true;
static int g_iPseudoShardingThresh = 8192;
static BuildBufferSettings_t g_tMergeSettings;
static int g_iLowPriorityDivisor = 10; // how smaller quantum low-priority tasks take comparing to normal in case of load
static bool LOG_LEVEL_SPLIT_QUERY = val_from_env ( "MANTICORE_LOG_SPLIT_QUERY", false ); // verbose logging split query events, ruled by this env variable
#define LOG_COMPONENT_QUERYINFO __LINE__ << " "
#define QUERYINFO LOGINFO ( SPLIT_QUERY, QUERYINFO )
// quick hack for indexer crash reporting
// one day, these might turn into a callback or something
int64_t g_iIndexerCurrentDocID = 0;
int64_t g_iIndexerCurrentHits = 0;
int64_t g_iIndexerCurrentRangeMin = 0;
int64_t g_iIndexerCurrentRangeMax = 0;
int64_t g_iIndexerPoolStartDocID = 0;
int64_t g_iIndexerPoolStartHit = 0;
static bool IndexBuildDone ( const BuildHeader_t & tBuildHeader, const WriteHeader_t & tWriteHeader, const CSphString & sFileName, CSphString & sError );
/////////////////////////////////////////////////////////////////////////////
// COMPILE-TIME CHECKS
/////////////////////////////////////////////////////////////////////////////
STATIC_SIZE_ASSERT ( SphOffset_t, 8 );
/////////////////////////////////////////////////////////////////////////////
// INTERNAL SPHINX CLASSES DECLARATIONS
/////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
static const char * g_dRankerNames[] =
{
"proximity_bm25",
"bm25",
"none",
"wordcount",
"proximity",
"matchany",
"fieldmask",
"sph04",
"expr",
"export",
NULL
};
const char * sphGetRankerName ( ESphRankMode eRanker )
{
if ( eRanker<SPH_RANK_PROXIMITY_BM25 || eRanker>=SPH_RANK_TOTAL )
return NULL;
return g_dRankerNames[eRanker];
}
/////////////////////////////////////////////////////////////////////
/// everything required to setup search term
class DiskIndexQwordSetup_c final : public ISphQwordSetup
{
public:
DiskIndexQwordSetup_c ( DataReaderFactoryPtr_c pDoclist, DataReaderFactoryPtr_c pHitlist, const BYTE * pSkips, int iSkiplistBlockSize, bool bSetupReaders, RowID_t iRowsCount )
: m_pDoclist ( std::move ( pDoclist ) )
, m_pHitlist ( std::move ( pHitlist ) )
, m_pSkips ( pSkips )
, m_iSkiplistBlockSize ( iSkiplistBlockSize )
, m_bSetupReaders ( bSetupReaders )
, m_iRowsCount ( iRowsCount )
{}
ISphQword * QwordSpawn ( const XQKeyword_t & tWord ) const final;
bool QwordSetup ( ISphQword * ) const final;
bool Setup ( ISphQword * ) const;
ISphQword * ScanSpawn() const final;
private:
DataReaderFactoryPtr_c m_pDoclist;
DataReaderFactoryPtr_c m_pHitlist;
const BYTE * m_pSkips;
int m_iSkiplistBlockSize = 0;
bool m_bSetupReaders = false;
RowID_t m_iRowsCount = INVALID_ROWID;
private:
bool SetupWithWrd ( const DiskIndexQwordTraits_c& tWord, DictEntry_t& tRes ) const;
bool SetupWithCrc ( const DiskIndexQwordTraits_c& tWord, DictEntry_t& tRes ) const;
};
/// query word from the searcher's point of view
template < bool INLINE_HITS, bool DISABLE_HITLIST_SEEK >
class DiskIndexQword_c : public DiskIndexQwordTraits_c
{
public:
DiskIndexQword_c ( bool bUseMinibuffer, bool bExcluded, int64_t iIndexId )
: DiskIndexQwordTraits_c ( bUseMinibuffer, bExcluded )
, m_iIndexId ( iIndexId )
{}
~DiskIndexQword_c()
{
if ( m_bSkipFromCache )
{
SkipCache::Release ( { m_iIndexId, m_uWordID } );
m_pSkipData = nullptr;
m_bSkipFromCache = false;
}
}
void Reset () final
{
if ( m_rdDoclist )
m_rdDoclist->Reset ();
if ( m_rdHitlist )
m_rdHitlist->Reset ();
ResetDecoderState();
}
void GetHitlistEntry ()
{
assert ( !m_bHitlistOver );
DWORD iDelta = m_rdHitlist->UnzipInt ();
if ( iDelta )
{
m_iHitPos += iDelta;
} else
{
m_iHitPos = EMPTY_HIT;
#ifndef NDEBUG
m_bHitlistOver = true;
#endif
}
}
RowID_t AdvanceTo ( RowID_t tRowID ) final
{
if ( m_tDoc.m_tRowID!=INVALID_ROWID && tRowID<=m_tDoc.m_tRowID )
return m_tDoc.m_tRowID;
bool bRewound = HintRowID (tRowID);
if ( bRewound || m_tDoc.m_tRowID==INVALID_ROWID )
ReadNext();
while ( m_tDoc.m_tRowID < tRowID )
ReadNext();
return m_tDoc.m_tRowID;
}
bool HintRowID ( RowID_t tRowID ) final
{
// tricky bit
// FindSpan() will match a block where tBaseRowIDPlus1[i] <= tRowID < tBaseRowIDPlus1[i+1]
// meaning that the subsequent ids decoded will be strictly > RefValue
// meaning that if previous (!) block ends with tRowID exactly,
// and we use tRowID itself as RefValue, that document gets lost!
// first check if we're still inside the last block
if ( m_iSkipListBlock==-1 )
{
if ( !m_pSkipData )
return true;
m_iSkipListBlock = FindSpan ( m_pSkipData->m_dSkiplist, tRowID );
if ( m_iSkipListBlock<0 )
return false;
}
else
{
assert(m_pSkipData);
const auto & dSkiplist = m_pSkipData->m_dSkiplist;
if ( m_iSkipListBlock < dSkiplist.GetLength()-1 )
{
int iNextBlock = m_iSkipListBlock+1;
RowID_t tNextBlockRowID = dSkiplist[iNextBlock].m_tBaseRowIDPlus1;
if ( tRowID>=tNextBlockRowID )
{
auto dSkips = VecTraits_T<SkiplistEntry_t> ( &dSkiplist[iNextBlock], dSkiplist.GetLength()-iNextBlock );
m_iSkipListBlock = FindSpan ( dSkips, tRowID );
if ( m_iSkipListBlock<0 )
return false;
m_iSkipListBlock += iNextBlock;
}
}
else // we're already at our last block, no need to search
return false;
}
assert(m_pSkipData);
const SkiplistEntry_t & t = m_pSkipData->m_dSkiplist[m_iSkipListBlock];
if ( t.m_iOffset<=m_rdDoclist->GetPos() )
return false;
m_rdDoclist->SeekTo ( t.m_iOffset, -1 );
m_tDoc.m_tRowID = t.m_tBaseRowIDPlus1-1;
m_uHitPosition = m_iHitlistPos = t.m_iBaseHitlistPos;
return true;
}
const CSphMatch & GetNextDoc() override
{
ReadNext();
return m_tDoc;
}
void SeekHitlist ( SphOffset_t uOff ) final
{
if ( uOff >> 63 )
{
m_uHitState = 1;
m_uInlinedHit = (DWORD)uOff; // truncate high dword
} else
{
m_uHitState = 0;
m_iHitPos = EMPTY_HIT;
if constexpr ( DISABLE_HITLIST_SEEK )
assert ( m_rdHitlist->GetPos()==uOff ); // make sure we're where caller thinks we are.
else
m_rdHitlist->SeekTo ( uOff, READ_NO_SIZE_HINT );
}
#ifndef NDEBUG
m_bHitlistOver = false;
#endif
}
Hitpos_t GetNextHit () final
{
assert ( m_bHasHitlist );
switch ( m_uHitState )
{
case 0: // read hit from hitlist
GetHitlistEntry ();
return m_iHitPos;
case 1: // return inlined hit
m_uHitState = 2;
return m_uInlinedHit;
case 2: // return end-of-hitlist marker after inlined hit
#ifndef NDEBUG
m_bHitlistOver = true;
#endif
m_uHitState = 0;
return EMPTY_HIT;
}
sphDie ( "INTERNAL ERROR: impossible hit emitter state" );
return EMPTY_HIT;
}
bool Setup ( const DiskIndexQwordSetup_c * pSetup ) override
{
return pSetup->Setup ( this );
}
using is_worddict = std::integral_constant<bool, !DISABLE_HITLIST_SEEK>;
private:
int m_iSkipListBlock = -1;
inline void ReadNext()
{
RowID_t uDelta = m_rdDoclist->UnzipRowid();
if ( uDelta )
{
m_bAllFieldsKnown = false;
m_tDoc.m_tRowID += uDelta;
if_const ( INLINE_HITS )
{
m_uMatchHits = m_rdDoclist->UnzipInt();
const DWORD uFirst = m_rdDoclist->UnzipInt();
if ( m_uMatchHits==1 && m_bHasHitlist )
{
DWORD uField = m_rdDoclist->UnzipInt(); // field and end marker
m_iHitlistPos = uFirst | ( uField << 23 ) | ( U64C(1)<<63 );
m_dQwordFields.UnsetAll();
// want to make sure bad field data not cause crash
m_dQwordFields.Set ( ( uField >> 1 ) & ( (DWORD)SPH_MAX_FIELDS-1 ) );
m_bAllFieldsKnown = true;
} else
{
m_dQwordFields.Assign32 ( uFirst );
m_uHitPosition += m_rdDoclist->UnzipOffset();
m_iHitlistPos = m_uHitPosition;
}
} else
{
SphOffset_t iDeltaPos = m_rdDoclist->UnzipOffset();
assert ( iDeltaPos>=0 );
m_iHitlistPos += iDeltaPos;
m_dQwordFields.Assign32 ( m_rdDoclist->UnzipInt() );
m_uMatchHits = m_rdDoclist->UnzipInt();
}
} else
m_tDoc.m_tRowID = INVALID_ROWID;
}
private:
int64_t m_iIndexId = 0;
};
DiskIndexQwordTraits_c * sphCreateDiskIndexQword ( bool bInlineHits )
{
if ( bInlineHits )
return new DiskIndexQword_c<true,false> ( false, false, 0 );
return new DiskIndexQword_c<false,false> ( false, false, 0 );
}
/////////////////////////////////////////////////////////////////////////////
#define WITH_QWORD(INDEX, NO_SEEK, NAME, ACTION) \
do if ( (( const CSphIndex_VLN *)INDEX)->m_tSettings.m_eHitFormat==SPH_HIT_FORMAT_INLINE ) \
{ using NAME = DiskIndexQword_c < true, NO_SEEK >; ACTION; } \
else \
{ using NAME = DiskIndexQword_c < false, NO_SEEK >; ACTION; } \
while(0)
/////////////////////////////////////////////////////////////////////////////
#define HITLESS_DOC_MASK 0x7FFFFFFF
#define HITLESS_DOC_FLAG 0x80000000
// duplicated in sphinxformat.cpp
struct Slice64_t
{
uint64_t m_uOff;
int m_iLen;
};
// duplicated in sphinxformat.cpp
struct DiskSubstringPayload_t : public ISphSubstringPayload
{
explicit DiskSubstringPayload_t ( int iDoclists )
: m_dDoclist ( iDoclists )
{}
CSphFixedVector<Slice64_t> m_dDoclist;
};
template < bool INLINE_HITS >
class DiskPayloadQword_c : public DiskIndexQword_c<INLINE_HITS, false>
{
typedef DiskIndexQword_c<INLINE_HITS, false> BASE;
public:
DiskPayloadQword_c ( const DiskSubstringPayload_t * pPayload, bool bExcluded, DataReaderFactory_c * pDoclist, DataReaderFactory_c * pHitlist, int64_t iIndexId )
: BASE ( true, bExcluded, iIndexId )
{
m_pPayload = pPayload;
this->m_iDocs = m_pPayload->m_iTotalDocs;
this->m_iHits = m_pPayload->m_iTotalHits;
m_iDoclist = 0;
this->SetDocReader ( pDoclist );
this->SetHitReader ( pHitlist );
}
const CSphMatch & GetNextDoc() final
{
const CSphMatch & tMatch = BASE::GetNextDoc();
assert ( &tMatch==&this->m_tDoc );
if ( tMatch.m_tRowID==INVALID_ROWID && m_iDoclist<m_pPayload->m_dDoclist.GetLength() )
{
BASE::ResetDecoderState();
SetupReader();
BASE::GetNextDoc();
assert ( this->m_tDoc.m_tRowID!=INVALID_ROWID );
}
return this->m_tDoc;
}
bool Setup ( const DiskIndexQwordSetup_c * ) final
{
if ( m_iDoclist>=m_pPayload->m_dDoclist.GetLength() )
return false;
SetupReader();
return true;
}
private:
void SetupReader ()
{
uint64_t uDocOff = m_pPayload->m_dDoclist[m_iDoclist].m_uOff;
int iHint = m_pPayload->m_dDoclist[m_iDoclist].m_iLen;
m_iDoclist++;
this->m_rdDoclist->SeekTo ( uDocOff, iHint );
}
const DiskSubstringPayload_t * m_pPayload;
int m_iDoclist;
};
//////////////////////////////////////////////////////////////////////////
const char* CheckFmtMagic ( DWORD uHeader )
{
if ( uHeader!=INDEX_MAGIC_HEADER )
{
FlipEndianness ( &uHeader );
if ( uHeader==INDEX_MAGIC_HEADER )
#if USE_LITTLE_ENDIAN
return "This instance is working on little-endian platform, but %s seems built on big-endian host.";
#else
return "This instance is working on big-endian platform, but %s seems built on little-endian host.";
#endif
else
return "%s is invalid header file (too old table version?)";
}
return nullptr;
}
/// this pseudo-index used to store and manage the tokenizer
/// without any footprint in real files
//////////////////////////////////////////////////////////////////////////
class CSphTokenizerIndex : public CSphIndexStub
{
public:
CSphTokenizerIndex ( CSphString sIndexName ) : CSphIndexStub ( std::move ( sIndexName ), "" ) {}
bool GetKeywords ( CSphVector <CSphKeywordInfo> & , const char * , const GetKeywordsSettings_t & tSettings, CSphString * ) const final ;
Bson_t ExplainQuery ( const CSphString & sQuery ) const final;
};
bool CSphTokenizerIndex::GetKeywords ( CSphVector <CSphKeywordInfo> & dKeywords, const char * szQuery, const GetKeywordsSettings_t & tSettings, CSphString * ) const
{
// short-cut if no query or keywords to fill
if ( !szQuery || !szQuery[0] )
return true;
TokenizerRefPtr_c pTokenizer = m_pTokenizer->Clone ( SPH_CLONE_INDEX );
pTokenizer->EnableTokenizedMultiformTracking ();
// need to support '*' and '=' but not the other specials
// so m_pQueryTokenizer does not work for us, gotta clone and setup one manually
DictRefPtr_c pDict = GetStatelessDict ( m_pDict );
if ( IsStarDict ( pDict->GetSettings().m_bWordDict ) )
{
pTokenizer->AddPlainChars ( "*" );
SetupStarDictOld ( pDict );
}
if ( m_tSettings.m_bIndexExactWords )
{
pTokenizer->AddSpecials ( "=" );
SetupExactDict ( pDict );
}
dKeywords.Resize ( 0 );
CSphVector<BYTE> dFiltered;
const BYTE * sModifiedQuery = (const BYTE *)szQuery;
FieldFilterOptions_t tFFOptions { tSettings.m_eJiebaMode };
if ( m_pFieldFilter && szQuery && m_pFieldFilter->Clone ( &tFFOptions )->Apply ( sModifiedQuery, dFiltered, true ) )
sModifiedQuery = dFiltered.Begin();
pTokenizer->SetBuffer ( sModifiedQuery, (int) strlen ( (const char*)sModifiedQuery) );
CSphTemplateQueryFilter tAotFilter;
tAotFilter.m_pTokenizer = std::move ( pTokenizer );
tAotFilter.m_pDict = std::move ( pDict );
tAotFilter.m_pSettings = &m_tSettings;
tAotFilter.m_tFoldSettings = tSettings;
tAotFilter.m_tFoldSettings.m_bStats = false;
tAotFilter.m_tFoldSettings.m_bFoldWildcards = true;
ExpansionContext_t tExpCtx;
tAotFilter.GetKeywords ( dKeywords, tExpCtx );
return true;
}
std::unique_ptr<CSphIndex> sphCreateIndexTemplate ( CSphString sIndexName )
{
return std::make_unique<CSphTokenizerIndex> ( std::move ( sIndexName ) );
}
Bson_t CSphTokenizerIndex::ExplainQuery ( const CSphString & sQuery ) const
{
bool bWordDict = m_pDict->GetSettings().m_bWordDict;
WordlistStub_c tWordlist;
ExplainQueryArgs_t tArgs;
tArgs.m_szQuery = sQuery.cstr();
tArgs.m_pDict = GetStatelessDict ( m_pDict );
if ( IsStarDict ( bWordDict ) )
SetupStarDictV8 ( tArgs.m_pDict, m_tSettings.m_iMinInfixLen>0 );
if ( m_tSettings.m_bIndexExactWords )
SetupExactDict ( tArgs.m_pDict );
if ( m_pFieldFilter )
tArgs.m_pFieldFilter = m_pFieldFilter->Clone();
tArgs.m_pSettings = &m_tSettings;
tArgs.m_pWordlist = &tWordlist;
tArgs.m_pQueryTokenizer = m_pQueryTokenizer;
tArgs.m_iExpandKeywords = m_tMutableSettings.m_iExpandKeywords;
tArgs.m_iExpansionLimit = m_iExpansionLimit;
tArgs.m_bExpandPrefix = ( bWordDict && IsStarDict ( bWordDict ) );
return Explain ( tArgs );
}
//////////////////////////////////////////////////////////////////////////
UpdateContext_t::UpdateContext_t ( AttrUpdateInc_t & tUpd, const ISphSchema & tSchema )
: m_tUpd ( tUpd )
, m_tSchema ( tSchema )
, m_iStride ( tSchema.GetRowSize() )
, m_dUpdatedAttrs ( tUpd.m_pUpdate->m_dAttributes.GetLength() )
, m_dSchemaUpdateMask ( tSchema.GetAttrsCount() )
{}
//////////////////////////////////////////////////////////////////////////
bool Update_CheckAttributes ( const CSphAttrUpdate & tUpd, const ISphSchema & tSchema, CSphString & sError )
{
for ( const auto & tUpdAttr : tUpd.m_dAttributes )
{
const CSphString & sUpdAttrName = tUpdAttr.m_sName;
int iUpdAttrId = tSchema.GetAttrIndex ( sUpdAttrName.cstr() );
// try to find JSON attribute with a field
if ( iUpdAttrId<0 )
{
CSphString sJsonCol;
if ( sphJsonNameSplit ( sUpdAttrName.cstr(), nullptr, &sJsonCol ) )
iUpdAttrId = tSchema.GetAttrIndex ( sJsonCol.cstr() );
}
if ( iUpdAttrId<0 )
{
if ( tUpd.m_bIgnoreNonexistent )
continue;
sError.SetSprintf ( "attribute '%s' not found", sUpdAttrName.cstr() );
return false;
}
// forbid updates on non-int columns
const CSphColumnInfo & tCol = tSchema.GetAttr ( iUpdAttrId );
switch ( tCol.m_eAttrType )
{
case SPH_ATTR_BOOL:
case SPH_ATTR_INTEGER:
case SPH_ATTR_TIMESTAMP:
case SPH_ATTR_UINT32SET:
case SPH_ATTR_INT64SET:
case SPH_ATTR_FLOAT_VECTOR:
case SPH_ATTR_STRING:
case SPH_ATTR_BIGINT:
case SPH_ATTR_FLOAT:
case SPH_ATTR_JSON:
break;
default:
sError.SetSprintf ( "attribute '%s' can not be updated (must be boolean, integer, bigint, float, timestamp, string, MVA or JSON)", sUpdAttrName.cstr() );
return false;
}
bool bSrcMva = tCol.m_eAttrType==SPH_ATTR_UINT32SET || tCol.m_eAttrType==SPH_ATTR_INT64SET || tCol.m_eAttrType==SPH_ATTR_FLOAT_VECTOR;
bool bDstMva = tUpdAttr.m_eType==SPH_ATTR_UINT32SET || tUpdAttr.m_eType==SPH_ATTR_INT64SET || tUpdAttr.m_eType==SPH_ATTR_FLOAT_VECTOR;
if ( bSrcMva!=bDstMva )
{
sError.SetSprintf ( "attribute '%s' MVA flag mismatch", sUpdAttrName.cstr() );
return false;
}
if( tCol.m_eAttrType==SPH_ATTR_UINT32SET && tUpdAttr.m_eType==SPH_ATTR_INT64SET )
{
sError.SetSprintf ( "attribute '%s' MVA bits (dst=%d, src=%d) mismatch", sUpdAttrName.cstr(), tCol.m_eAttrType, tUpdAttr.m_eType );
return false;
}
if( ( tCol.m_eAttrType==SPH_ATTR_UINT32SET || tCol.m_eAttrType==SPH_ATTR_INT64SET ) && tUpdAttr.m_eType==SPH_ATTR_FLOAT_VECTOR )
{
sError.SetSprintf ( "can't update MVA attribute '%s' bits with float vector value", sUpdAttrName.cstr() );
return false;
}
if ( tCol.IsColumnar() )
{
sError.SetSprintf ( "unable to update columnar attribute '%s'", sUpdAttrName.cstr() );
return false;
}
if ( tCol.IsIndexedKNN() )
{
sError.SetSprintf ( "unable to update attribute '%s' that has a KNN index", sUpdAttrName.cstr() );
return false;
}
}
return true;
}
static void IncUpdatePoolPos ( const CSphAttrUpdate & tUpdate, int iAttr, int & iPos )
{
switch ( tUpdate.m_dAttributes[iAttr].m_eType )
{
case SPH_ATTR_UINT32SET:
case SPH_ATTR_INT64SET:
case SPH_ATTR_FLOAT_VECTOR:
iPos += tUpdate.m_dPool[iPos] + 1;
break;
case SPH_ATTR_STRING:
case SPH_ATTR_BIGINT:
iPos += 2;
break;
default:
iPos += 1;
break;
}
}
void UpdateContext_t::PrepareListOfUpdatedAttributes ( CSphString & sError )
{
int iPoolPos = 0;
const auto & tUpd = *m_tUpd.m_pUpdate;
ARRAY_FOREACH ( iAttr, tUpd.m_dAttributes )
{
const CSphString & sUpdAttrName = tUpd.m_dAttributes[iAttr].m_sName;
ESphAttr eUpdAttrType = tUpd.m_dAttributes[iAttr].m_eType;
UpdatedAttribute_t & tUpdAttr = m_dUpdatedAttrs[iAttr];
int iUpdAttrId = m_tSchema.GetAttrIndex ( sUpdAttrName.cstr() );
if ( iUpdAttrId<0 )
{
CSphString sJsonCol;
if ( sphJsonNameSplit ( sUpdAttrName.cstr(), nullptr, &sJsonCol ) )
{
iUpdAttrId = m_tSchema.GetAttrIndex ( sJsonCol.cstr() );
if ( iUpdAttrId>=0 )
{
ExprParseArgs_t tExprArgs;
tUpdAttr.m_pExpr = sphExprParse ( sUpdAttrName.cstr(), m_tSchema, nullptr, sError, tExprArgs );
}
}
}
if ( iUpdAttrId>=0 )
{
const CSphColumnInfo & tCol = m_tSchema.GetAttr(iUpdAttrId);
switch ( tCol.m_eAttrType )
{
case SPH_ATTR_FLOAT:
if ( eUpdAttrType==SPH_ATTR_BIGINT )
tUpdAttr.m_eConversion = CONVERSION_BIGINT2FLOAT;
break;
case SPH_ATTR_BIGINT:
if ( eUpdAttrType==SPH_ATTR_FLOAT )
tUpdAttr.m_eConversion = CONVERSION_FLOAT2BIGINT;
break;
default:
break;
}
tUpdAttr.m_eAttrType = tCol.m_eAttrType;
tUpdAttr.m_tLocator = tCol.m_tLocator;
tUpdAttr.m_pHistogram = m_pHistograms ? m_pHistograms->Get(tCol.m_sName) : nullptr;
tUpdAttr.m_bExisting = true;
tUpdAttr.m_iSchemaAttr = iUpdAttrId;
m_dSchemaUpdateMask.BitSet(iUpdAttrId);
m_bBlobUpdate |= sphIsBlobAttr(tCol);
}
else
{
assert ( tUpd.m_bIgnoreNonexistent ); // should be handled by Update_CheckAttributes
IncUpdatePoolPos ( tUpd, iAttr, iPoolPos );
continue;
}
// this is a hack
// Query parser tries to detect an attribute type. And this is wrong because, we should
// take attribute type from schema. Probably we'll rewrite updates in future but
// for now this fix just works.
// Fix cases like UPDATE float_attr=1 WHERE id=1;
assert ( iUpdAttrId>=0 );
if ( eUpdAttrType==SPH_ATTR_INTEGER && m_tSchema.GetAttr(iUpdAttrId).m_eAttrType==SPH_ATTR_FLOAT )
{
assert ( tUpd.m_dRowOffset.IsEmpty() ); // fixme! Now we don't fixup more then 1 value
const_cast<CSphAttrUpdate &>(tUpd).m_dAttributes[iAttr].m_eType = SPH_ATTR_FLOAT;
const_cast<CSphAttrUpdate &>(tUpd).m_dPool[iPoolPos] = sphF2DW ( (float)tUpd.m_dPool[iPoolPos] );
}
IncUpdatePoolPos ( tUpd, iAttr, iPoolPos );
}
}
static bool FitsInplaceJsonUpdate ( const UpdateContext_t & tCtx, int iAttr )
{
// only json fields and no strings (strings go as full json updates)
return tCtx.m_dUpdatedAttrs[iAttr].m_eAttrType==SPH_ATTR_JSON && tCtx.m_tUpd.m_pUpdate->m_dAttributes[iAttr].m_eType!=SPH_ATTR_STRING;
}
bool IndexSegment_c::Update_InplaceJson ( const RowsToUpdate_t& dRows, UpdateContext_t & tCtx, CSphString & sError, bool bDryRun )
{
const auto& tUpd = *tCtx.m_tUpd.m_pUpdate;
for ( const auto & tRow : dRows )
{
int iUpd = tRow.m_iIdx;
auto pDocinfo = tCtx.GetDocinfo ( tRow.m_tRow );
int iPos = tUpd.GetRowOffset ( iUpd );
ARRAY_CONSTFOREACH ( i, tUpd.m_dAttributes )
{
if ( !FitsInplaceJsonUpdate ( tCtx, i ) || !tCtx.m_dUpdatedAttrs[i].m_bExisting )
{
IncUpdatePoolPos ( tUpd, i, iPos );
continue;
}
ESphAttr eAttr = tUpd.m_dAttributes[i].m_eType;
bool bBigint = eAttr==SPH_ATTR_BIGINT;
bool bDouble = eAttr==SPH_ATTR_FLOAT;
ESphJsonType eType = bDouble ? JSON_DOUBLE : ( bBigint ? JSON_INT64 : JSON_INT32 );
SphAttr_t uValue = bDouble
? sphD2QW ( (double)sphDW2F ( tUpd.m_dPool[iPos] ) )
: ( bBigint ? MVA_UPSIZE ( &tUpd.m_dPool[iPos] ) : tUpd.m_dPool[iPos] );
if ( sphJsonInplaceUpdate ( eType, uValue, tCtx.m_dUpdatedAttrs[i].m_pExpr, tCtx.m_pBlobPool, pDocinfo, !bDryRun ) )
{
assert ( tCtx.m_dUpdatedAttrs[i].m_iSchemaAttr>=0 );
tCtx.m_tUpd.MarkUpdated ( iUpd );
tCtx.m_uUpdateMask |= ATTRS_BLOB_UPDATED;
// reset update bit to copy partial updated JSON into new blob
tCtx.m_dSchemaUpdateMask.BitClear ( tCtx.m_dUpdatedAttrs[i].m_iSchemaAttr );
} else
{
if ( bDryRun )
{
sError.SetSprintf ( "attribute '%s' can not be updated (not found or incompatible types)", tUpd.m_dAttributes[i].m_sName.cstr() );
return false;
} else
++tCtx.m_iJsonWarnings;
}
IncUpdatePoolPos ( tUpd, i, iPos );
}
}
return true;
}
bool IndexSegment_c::Update_Blobs ( const RowsToUpdate_t& dRows, UpdateContext_t & tCtx, bool & bCritical, CSphString & sError )
{
const auto & tUpd = *tCtx.m_tUpd.m_pUpdate;
// any blobs supplied in the update?
if ( !tCtx.m_bBlobUpdate )
return true;
// create a remap from attribute id in UPDATE to blob attr id
CSphVector<int> dRemap ( tUpd.m_dAttributes.GetLength() );
dRemap.Fill(-1);
CSphVector<int> dBlobAttrIds;
bool bNeedBlobBuilder = false;
int iBlobAttrId = 0;
for ( int i = 0, iAttrs = tCtx.m_tSchema.GetAttrsCount (); i < iAttrs; ++i )
{
const CSphColumnInfo & tAttr = tCtx.m_tSchema.GetAttr(i);
if ( sphIsBlobAttr(tAttr) )
{
dBlobAttrIds.Add(i);
ARRAY_CONSTFOREACH ( iUpd, tUpd.m_dAttributes )
{
const TypedAttribute_t & tTypedAttr = tUpd.m_dAttributes[iUpd];
if ( sphIsBlobAttr ( tTypedAttr.m_eType ) && tAttr.m_sName==tTypedAttr.m_sName )
{
dRemap[iUpd] = iBlobAttrId;
bNeedBlobBuilder = true;
}
}
++iBlobAttrId;
}
}
if ( !bNeedBlobBuilder )
return true;
CSphTightVector<BYTE> tBlobPool;
std::unique_ptr<BlobRowBuilder_i> pBlobRowBuilder = sphCreateBlobRowBuilderUpdate ( tCtx.m_tSchema, tUpd.m_dAttributes, tBlobPool, tCtx.m_dSchemaUpdateMask );
const CSphColumnInfo * pBlobLocator = tCtx.m_tSchema.GetAttr ( sphGetBlobLocatorName() );
for ( const auto & tRow : dRows )
{
int iUpd = tRow.m_iIdx;
auto pDocinfo = tCtx.GetDocinfo ( tRow.m_tRow );
tBlobPool.Resize(0);
ARRAY_CONSTFOREACH ( iBlobId, dBlobAttrIds )
{
int iCol = dBlobAttrIds[iBlobId];
if ( tCtx.m_dSchemaUpdateMask.BitGet(iCol) )
continue;
const CSphColumnInfo & tAttr = tCtx.m_tSchema.GetAttr(iCol);
int iLengthBytes = 0;
const BYTE* pData = sphGetBlobAttr ( pDocinfo, tAttr.m_tLocator, tCtx.m_pBlobPool, iLengthBytes );
if ( !pBlobRowBuilder->SetAttr ( iBlobId, pData, iLengthBytes, sError ) )
return false;
}
int iPos = tUpd.GetRowOffset ( iUpd );
ARRAY_CONSTFOREACH ( iCol, tUpd.m_dAttributes )
{
ESphAttr eAttr = tUpd.m_dAttributes[iCol].m_eType;
if ( !sphIsBlobAttr(eAttr) || FitsInplaceJsonUpdate ( tCtx, iCol ) || !tCtx.m_dUpdatedAttrs[iCol].m_bExisting )
{
IncUpdatePoolPos ( tUpd, iCol, iPos );
continue;
}
int iBlobId = dRemap[iCol];
switch ( eAttr )
{
case SPH_ATTR_UINT32SET:
case SPH_ATTR_INT64SET:
case SPH_ATTR_FLOAT_VECTOR:
{
DWORD uLength = tUpd.m_dPool[iPos++];
if ( iBlobId!=-1 )
{
pBlobRowBuilder->SetAttr ( iBlobId, (const BYTE *)(tUpd.m_dPool.Begin()+iPos), uLength*sizeof(DWORD), sError );
tCtx.m_tUpd.MarkUpdated ( iUpd );
tCtx.m_uUpdateMask |= ATTRS_BLOB_UPDATED;
}
iPos += uLength;
}
break;
case SPH_ATTR_STRING:
{
DWORD uOffset = tUpd.m_dPool[iPos++];
DWORD uLength = tUpd.m_dPool[iPos++];
if ( iBlobId!=-1 )
{
pBlobRowBuilder->SetAttr ( iBlobId, &tUpd.m_dBlobs[uOffset], uLength, sError );
tCtx.m_tUpd.MarkUpdated ( iUpd );
tCtx.m_uUpdateMask |= ATTRS_BLOB_UPDATED;
}
}
break;
default:
IncUpdatePoolPos ( tUpd, iCol, iPos );
break;
}
}
pBlobRowBuilder->Flush();
assert(pBlobLocator);
if ( !Update_WriteBlobRow ( tCtx, tRow.m_tRow, tBlobPool, iBlobAttrId, pBlobLocator->m_tLocator, bCritical, sError ) )
return false;
}
return true;
}
bool UpdateContext_t::HandleJsonWarnings ( int iUpdated, CSphString & sWarning, CSphString & sError ) const
{
if ( !m_iJsonWarnings )
return true;
sWarning.SetSprintf ( "%d attribute(s) can not be updated (not found or incompatible types)", m_iJsonWarnings );
if ( !iUpdated )
sError = sWarning;
return !!iUpdated;
}
CSphRowitem* UpdateContext_t::GetDocinfo ( RowID_t iRowID ) const
{
assert ( iRowID != INVALID_ROWID );
assert ( m_pAttrPool );
assert ( m_iStride );
return m_pAttrPool + iRowID * m_iStride;
}
void IndexSegment_c::Update_Plain ( const RowsToUpdate_t& dRows, UpdateContext_t & tCtx )
{
const auto & tUpd = *tCtx.m_tUpd.m_pUpdate;
for ( const auto & tRow : dRows )
{
int iPos = tUpd.GetRowOffset ( tRow.m_iIdx );
auto pDocinfo = tCtx.GetDocinfo ( tRow.m_tRow );
ARRAY_CONSTFOREACH ( iCol, tUpd.m_dAttributes )
{
ESphAttr eAttr = tUpd.m_dAttributes[iCol].m_eType;
const UpdatedAttribute_t & tUpdAttr = tCtx.m_dUpdatedAttrs[iCol];
// already updated?
if ( sphIsBlobAttr(eAttr) || tUpdAttr.m_eAttrType==SPH_ATTR_JSON || !tUpdAttr.m_bExisting )
{
IncUpdatePoolPos ( tUpd, iCol, iPos );
continue;
}
const CSphAttrLocator & tLoc = tUpdAttr.m_tLocator;
bool bBigint = eAttr==SPH_ATTR_BIGINT;
SphAttr_t uValue = bBigint ? MVA_UPSIZE ( &tUpd.m_dPool[iPos] ) : tUpd.m_dPool[iPos];
if ( tUpdAttr.m_eConversion==CONVERSION_BIGINT2FLOAT ) // handle bigint(-1) -> float attr updates
uValue = sphF2DW ( float((int64_t)uValue) );
else if ( tUpdAttr.m_eConversion==CONVERSION_FLOAT2BIGINT ) // handle float(1.0) -> bigint attr updates
uValue = (int64_t)sphDW2F((DWORD)uValue);
Histogram_i * pHistogram = tUpdAttr.m_pHistogram;
if ( pHistogram )
{
SphAttr_t tOldValue = sphGetRowAttr ( pDocinfo, tLoc );
pHistogram->Delete ( tOldValue );
pHistogram->UpdateCounter ( uValue );
}
sphSetRowAttr ( pDocinfo, tLoc, uValue );
tCtx.m_tUpd.MarkUpdated ( tRow.m_iIdx );
tCtx.m_uUpdateMask |= ATTRS_UPDATED;
// next
IncUpdatePoolPos ( tUpd, iCol, iPos );
}
}
}
bool IndexSegment_c::Update_UpdateAttributes ( const RowsToUpdate_t& dRows, UpdateContext_t& tCtx, bool& bCritical, CSphString& sError )
{
// FIXME! FIXME! FIXME! overwriting just-freed blocks might hurt concurrent searchers;
// should implement a simplistic MVCC-style delayed-free to avoid that
TRACE_CORO ( "rt", "IndexSegment_c::Update_UpdateAttributes" );
// first pass, if needed
if ( tCtx.m_tUpd.m_pUpdate->m_bStrict )
if ( !Update_InplaceJson ( dRows, tCtx, sError, true ) )
return false;
// second pass
int iSaveWarnings = tCtx.m_iJsonWarnings;
tCtx.m_iJsonWarnings = 0;
Update_InplaceJson ( dRows, tCtx, sError, false );
tCtx.m_iJsonWarnings += iSaveWarnings;
if ( !Update_Blobs ( dRows, tCtx, bCritical, sError ) )
return false;
Update_Plain ( dRows, tCtx );
return true;
}
class QueryMvaContainer_c
{
public:
CSphVector<OpenHashTable_T<int64_t,CSphVector<int64_t>>*> m_tContainer;
~QueryMvaContainer_c()
{
for ( auto i : m_tContainer )
delete i;
}
};
class CSphHitBuilder;
/// this is my actual VLN-compressed phrase index implementation
class CSphIndex_VLN : public CSphIndex, public IndexAlterHelper_c, public DebugCheckHelper_c
{
friend class DiskIndexQwordSetup_c;
friend class CSphMerger;
friend class AttrIndexBuilder_c;
friend struct SphFinalMatchCalc_t;
friend class KeepAttrs_c;
public:
CSphIndex_VLN ( CSphString sIndexName, CSphString sFilename );
~CSphIndex_VLN() override;
int Build ( const CSphVector<CSphSource*> & dSources, int iMemoryLimit, int iWriteBuffer, CSphIndexProgress & tProgress ) final; // fixme! build only
enum class LOAD_E { ParseError_e, GeneralError_e, Ok_e };
LOAD_E LoadHeaderLegacy ( const CSphString& sHeaderName, bool bStripPath, CSphEmbeddedFiles & tEmbeddedFiles, FilenameBuilder_i * pFilenameBuilder, CSphString & sWarning );
LOAD_E LoadHeaderJson ( const CSphString& sHeaderName, bool bStripPath, CSphEmbeddedFiles & tEmbeddedFiles, FilenameBuilder_i * pFilenameBuilder, CSphString & sWarning );
void DebugDumpHeader ( FILE * fp, const CSphString& sHeaderName, bool bConfig ) final;
void DebugDumpDocids ( FILE * fp ) final;
void DebugDumpHitlist ( FILE * fp, const char * sKeyword, bool bID ) final;
void DebugDumpDict ( FILE * fp, bool bDumpOnly ) final;
void SetDebugCheck ( bool bCheckIdDups, int iCheckChunk ) final;
int DebugCheck ( DebugCheckError_i & , FilenameBuilder_i * pFilenameBuilder ) final;
template <class Qword> void DumpHitlist ( FILE * fp, const char * sKeyword, bool bID );
bool Prealloc ( bool bStripPath, FilenameBuilder_i * pFilenameBuilder, StrVec_t & dWarnings ) final;
void Dealloc () final;
void Preread () final;
RenameResult_e RenameEx ( CSphString sNewBase ) final;
bool Lock () final;
void Unlock () final;
bool MultiQuery ( CSphQueryResult& tResult, const CSphQuery& tQuery, const VecTraits_T<ISphMatchSorter*>& dAllSorters, const CSphMultiQueryArgs& tArgs ) const final;
bool MultiQueryEx ( int iQueries, const CSphQuery * pQueries, CSphQueryResult* pResults, ISphMatchSorter ** ppSorters, const CSphMultiQueryArgs & tArgs ) const final;
bool GetKeywords ( CSphVector <CSphKeywordInfo> & dKeywords, const char * szQuery, const GetKeywordsSettings_t & tSettings, CSphString * pError ) const final;
template <class Qword> bool DoGetKeywords ( CSphVector <CSphKeywordInfo> & dKeywords, const char * szQuery, const GetKeywordsSettings_t & tSettings, bool bFillOnly, CSphString * pError ) const;
bool FillKeywords ( CSphVector <CSphKeywordInfo> & dKeywords ) const final;
void GetSuggest ( const SuggestArgs_t & tArgs, SuggestResult_t & tRes ) const final;
bool Merge ( CSphIndex * pSource, const VecTraits_T<CSphFilterSettings> & dFilters, bool bSupressDstDocids, CSphIndexProgress & tProgress ) final; // fixme! build only
template <class QWORDDST, class QWORDSRC>
static bool MergeWords ( const CSphIndex_VLN * pDstIndex, const CSphIndex_VLN * pSrcIndex, VecTraits_T<RowID_t> dDstRows, VecTraits_T<RowID_t> dSrcRows, CSphHitBuilder * pHitBuilder, CSphString & sError, CSphIndexProgress & tProgress);
static bool DoMerge ( const CSphIndex_VLN * pDstIndex, const CSphIndex_VLN * pSrcIndex, const ISphFilter * pFilter, CSphString & sError, CSphIndexProgress & tProgress, bool bSrcSettings, bool bSupressDstDocids );
std::unique_ptr<ISphFilter> CreateMergeFilters ( const VecTraits_T<CSphFilterSettings> & dSettings ) const;
template <class QWORD>
static bool DeleteField ( const CSphIndex_VLN * pIndex, CSphHitBuilder * pHitBuilder, CSphString & sError, CSphSourceStats & tStat, int iKillField );
int CheckThenUpdateAttributes ( AttrUpdateInc_t& tUpd, bool& bCritical, CSphString& sError, CSphString& sWarning ) final;
void UpdateAttributesOffline ( VecTraits_T<PostponedUpdate_t> & dPostUpdates ) final;
// the only txn we can replay is 'update attributes', but it is processed by dedicated branch in binlog, so we have nothing to do here.
Binlog::CheckTnxResult_t ReplayTxn ( CSphReader&, CSphString&, BYTE, Binlog::CheckTxn_fn&& ) final;
bool SaveAttributes ( CSphString & sError ) const final;
DWORD GetAttributeStatus () const final;
bool AddRemoveAttribute ( bool bAddAttr, const AttrAddRemoveCtx_t & tCtx, CSphString & sError ) final;
bool AddRemoveField ( bool bAdd, const CSphString & sFieldName, DWORD uFieldFlags, CSphString & sError ) final;
void FlushDeadRowMap ( bool bWaitComplete ) const final;
bool LoadKillList ( CSphFixedVector<DocID_t> * pKillList, KillListTargets_c & tTargets, CSphString & sError ) const final;
bool AlterKillListTarget ( KillListTargets_c & tTargets, CSphString & sError ) final;
void KillExistingDocids ( CSphIndex * pTarget ) const final;
bool EarlyReject ( CSphQueryContext * pCtx, CSphMatch & tMatch ) const final;
void SetKeepAttrs ( const CSphString & sKeepAttrs, const StrVec_t & dAttrs ) final { m_sKeepAttrs = sKeepAttrs; m_dKeepAttrs = dAttrs; }
RowID_t GetRowidByDocid ( DocID_t tDocID ) const;
int Kill ( DocID_t tDocID ) final;
int KillMulti ( const VecTraits_T<DocID_t> & dKlist ) final;
int KillDupes() final;
int CheckThenKillMulti ( const VecTraits_T<DocID_t>& dKlist, BlockerFn&& fnWatcher ) final;
bool IsAlive ( DocID_t tDocID ) const final;
const CSphSourceStats & GetStats () const final { return m_tStats; }
int64_t * GetFieldLens() const final { return m_tSettings.m_bIndexFieldLens ? m_dFieldLens.begin() : nullptr; }
void GetStatus ( CSphIndexStatus* ) const final;
bool PreallocWordlist();
bool PreallocAttributes();
bool PreallocDocidLookup();
bool PreallocKilllist();
bool PreallocHistograms ( StrVec_t & dWarnings );
bool PreallocDocstore();
bool PreallocColumnar();
bool PreallocKNN();
bool PreallocSkiplist();
bool LoadSecondaryIndex ( const CSphString & sFile );
bool PreallocSecondaryIndex();
void PrepareHeaders ( BuildHeader_t & tBuildHeader, WriteHeader_t & tWriteHeader, bool bCopyDictHeader = true );
bool SaveHeader ( CSphString & sError );
CSphVector<SphAttr_t> BuildDocList () const final;
// docstore-related section
void CreateReader ( int64_t iSessionId ) const final;
bool GetDoc ( DocstoreDoc_t & tDoc, DocID_t tDocID, const VecTraits_T<int> * pFieldIds, int64_t iSessionId, bool bPack ) const final;
int GetFieldId ( const CSphString & sName, DocstoreDataType_e eType ) const final;
Bson_t ExplainQuery ( const CSphString & sQuery ) const final;
HistogramContainer_c * Debug_GetHistograms() const override { return m_pHistograms; }
const SIContainer_c * Debug_GetSI() const override { return &m_tSI; }
bool CheckEarlyReject ( const CSphVector<CSphFilterSettings> & dFilters, const ISphFilter * pFilter, ESphCollation eCollation, const ISphSchema & tSchema ) const;
std::pair<int64_t,int> GetPseudoShardingMetric ( const VecTraits_T<const CSphQuery> & dQueries, const VecTraits_T<int64_t> & dMaxCountDistinct, int iThreads, bool & bForceSingleThread ) const override;
int64_t GetCountDistinct ( const CSphString & sAttr, CSphString & sModifiedAttr ) const override;
int64_t GetCountFilter ( const CSphFilterSettings & tFilter, CSphString & sModifiedAttr ) const override;
int64_t GetCount() const override;
private:
static const int MIN_WRITE_BUFFER = 262144; ///< min write buffer size
static const int DEFAULT_WRITE_BUFFER = 1048576; ///< default write buffer size
private:
// common stuff
int m_iLockFD;
CSphSourceStats m_tStats; ///< my stats
CSphFixedVector<int64_t> m_dFieldLens; ///< total per-field lengths summed over entire indexed data, in tokens
CSphString m_sKeepAttrs; ///< retain attributes of that index reindexing //fixme! build only
StrVec_t m_dKeepAttrs; // fixme! build only
private:
int64_t m_iDocinfo; ///< my docinfo cache size
int64_t m_iDocinfoIndex; ///< docinfo "index" entries count (each entry is 2x docinfo rows, for min/max)
DWORD * m_pDocinfoIndex; ///< docinfo "index", to accelerate filtering during full-scan (2x rows for each block, and 2x rows for the whole index, 1+m_uDocinfoIndex entries)
int64_t m_iMinMaxIndex; ///< stored min/max cache offset (counted in DWORDs)
// !COMMIT slow setup data
CSphMappedBuffer<DWORD> m_tAttr;
CSphMappedBuffer<BYTE> m_tBlobAttrs;
CSphMappedBuffer<BYTE> m_tSkiplists; ///< (compressed) skiplists data
CWordlist m_tWordlist; ///< my wordlist
DeadRowMap_Disk_c m_tDeadRowMap;
CSphMappedBuffer<BYTE> m_tDocidLookup; ///< speeds up docid-rowid lookups + used for applying killlist on startup
LookupReader_c m_tLookupReader; ///< used by getrowidbydocid
std::unique_ptr<Docstore_i> m_pDocstore;
std::unique_ptr<columnar::Columnar_i> m_pColumnar;
std::unique_ptr<knn::KNN_i> m_pKNN;
SIContainer_c m_tSI;
DWORD m_uVersion; ///< data files version
volatile bool m_bPassedRead;
volatile bool m_bPassedAlloc;
bool m_bIsEmpty; ///< do we have actually indexed documents (m_iTotalDocuments is just fetched documents, not indexed!)
bool m_bDebugCheck;
bool m_bCheckIdDups = false;
mutable DWORD m_uAttrsStatus = 0;
DataReaderFactoryPtr_c m_pDoclistFile; ///< doclist file
DataReaderFactoryPtr_c m_pHitlistFile; ///< hitlist file
DataReaderFactoryPtr_c m_pColumnarFile; ///< columnar file
HistogramContainer_c * m_pHistograms {nullptr};
private:
void GetIndexFiles ( StrVec_t& dFiles, StrVec_t& dExt, const FilenameBuilder_i* = nullptr ) const override;
bool ParsedMultiQuery ( const CSphQuery & tQuery, CSphQueryResult & tResult, const VecTraits_T<ISphMatchSorter*> & dSorters, const XQQuery_t & tXQ, DictRefPtr_c pDict, const CSphMultiQueryArgs & tArgs, CSphQueryNodeCache * pNodeCache, int64_t tmMaxTimer ) const;
bool RunParsedMultiQuery ( int iStackNeed, DictRefPtr_c & pDict, bool bCloneDict, const CSphQuery & tQuery, CSphQueryResult & tResult, VecTraits_T<ISphMatchSorter*> & dSorters, const XQQuery_t & tParsed, const CSphMultiQueryArgs & tArgs, int64_t tmMaxTimer ) const;
template <bool ROWID_LIMITS>
bool ScanByBlocks ( const CSphQueryContext & tCtx, CSphQueryResultMeta & tMeta, const VecTraits_T<ISphMatchSorter *> & dSorters, CSphMatch & tMatch, int iCutoff, bool bRandomize, int iIndexWeight, int64_t tmMaxTimer, const RowIdBoundaries_t * pBoundaries = nullptr ) const;
bool RunFullscanOnAttrs ( const RowIdBoundaries_t & tBoundaries, const CSphQueryContext & tCtx, CSphQueryResultMeta & tMeta, const VecTraits_T<ISphMatchSorter *> & dSorters, CSphMatch & tMatch, int iCutoff, bool bRandomize, int iIndexWeight, int64_t tmMaxTimer ) const;
bool RunFullscanOnIterator ( RowidIterator_i * pIterator, const CSphQueryContext & tCtx, CSphQueryResultMeta & tMeta, const VecTraits_T<ISphMatchSorter *> & dSorters, CSphMatch & tMatch, int iCutoff, bool bRandomize, int iIndexWeight, int64_t tmMaxTimer ) const;
bool MultiScan ( CSphQueryResult& tResult, const CSphQuery& tQuery, const VecTraits_T<ISphMatchSorter*>& dSorters, const CSphMultiQueryArgs& tArgs, int64_t tmMaxTimer ) const;
template<bool USE_KLIST, bool RANDOMIZE, bool USE_FACTORS, bool HAS_SORT_CALC, bool HAS_WEIGHT_FILTER, bool HAS_FILTER_CALC, bool HAS_CUTOFF>
void MatchExtended ( CSphQueryContext & tCtx, const CSphQuery & tQuery, const VecTraits_T<ISphMatchSorter *>& dSorters, ISphRanker * pRanker, int iTag, int iIndexWeight, int iCutoff ) const;
const CSphRowitem * FindDocinfo ( DocID_t tDocID ) const;
const DWORD * GetDocinfoByRowID ( RowID_t tRowID ) const;
RowID_t GetRowIDByDocinfo ( const CSphRowitem * pDocinfo ) const;
void SetupStarDict ( DictRefPtr_c &pDict ) const;
void SetupExactDict ( DictRefPtr_c &pDict ) const;
bool RelocateBlock ( int iFile, BYTE * pBuffer, int iRelocationSize, SphOffset_t * pFileSize, CSphBin & dMinBin, SphOffset_t * pSharedOffset ); // build only
bool SortDocidLookup ( int iFD, int nBlocks, int iMemoryLimit, int nLookupsInBlock, int nLookupsInLastBlock, CSphIndexProgress& tProgress ); // build only
private:
bool JuggleFile ( ESphExt eExt, CSphString & sError, bool bNeedSrc=true, bool bNeedDst=true ) const;
XQNode_t * ExpandPrefix ( XQNode_t * pNode, CSphQueryResultMeta & tMeta, CSphScopedPayload * pPayloads, DWORD uQueryDebugFlags, int iQueryExpansionLimit ) const;
static std::pair<DWORD,DWORD> CreateRowMapsAndCountTotalDocs ( const CSphIndex_VLN* pSrcIndex, const CSphIndex_VLN* pDstIndex, CSphFixedVector<RowID_t>& dSrcRowMap, CSphFixedVector<RowID_t>& dDstRowMap, const ISphFilter* pFilter, bool bSupressDstDocids, MergeCb_c& tMonitor );
RowsToUpdateData_t Update_CollectRowPtrs ( const UpdateContext_t & tCtx );
RowsToUpdate_t Update_PrepareGatheredRowPtrs ( RowsToUpdate_t & dWRows, const VecTraits_T<DocID_t> & dDocids );
bool Update_WriteBlobRow ( UpdateContext_t & tCtx, RowID_t tRowID, ByteBlob_t tBlob, int nBlobAttrs, const CSphAttrLocator & tBlobRowLoc, bool & bCritical, CSphString & sError ) final;
void Update_MinMax ( const RowsToUpdate_t& dRows, const UpdateContext_t & tCtx );
void MaybeAddPostponedUpdate ( RowsToUpdateData_t& dRows, const UpdateContext_t& tCtx );
bool DoUpdateAttributes ( const RowsToUpdate_t& dRows, UpdateContext_t& tCtx, bool & bCritical, CSphString & sError );
bool Alter_IsMinMax ( const CSphRowitem * pDocinfo, int iStride ) const override;
bool AddRemoveColumnarAttr ( bool bAddAttr, const CSphString & sAttrName, ESphAttr eAttrType, const ISphSchema & tOldSchema, const ISphSchema & tNewSchema, CSphString & sError );
bool DeleteFieldFromDict ( int iFieldId, BuildHeader_t & tBuildHeader, CSphString & sError );
bool AddRemoveFromDocstore ( const CSphSchema & tOldSchema, const CSphSchema & tNewSchema, CSphString & sError );
bool Build_SetupInplace ( SphOffset_t & iHitsGap, int iHitsMax, int iFdHits ) const; // fixme! build only
bool Build_SetupDocstore ( std::unique_ptr<DocstoreBuilder_i> & pDocstore, CSphBitvec & dStoredFields, CSphBitvec & dStoredAttrs, CSphVector<CSphVector<BYTE>> & dTmpDocstoreFieldStorage, CSphVector<CSphVector<BYTE>> & dTmpDocstoreAttrStorage ); // fixme! build only
bool Build_SetupBlobBuilder ( std::unique_ptr<BlobRowBuilder_i> & pBuilder ); // fixme! build only
bool Build_SetupColumnar ( std::unique_ptr<columnar::Builder_i> & pBuilder, CSphBitvec & tColumnarAttrs ); // fixme! build only
bool Build_SetupSI ( std::unique_ptr<SI::Builder_i> & pSIBuilder, std::unique_ptr<JsonSIBuilder_i> & pJsonSIBuilder, CSphBitvec & tSIAttrs, int64_t iMemoryLimit );
void Build_AddToDocstore ( DocstoreBuilder_i * pDocstoreBuilder, DocID_t tDocID, QueryMvaContainer_c & tMvaContainer, CSphSource & tSource, const CSphBitvec & dStoredFields, const CSphBitvec & dStoredAttrs, CSphVector<CSphVector<BYTE>> & dTmpDocstoreFieldStorage, CSphVector<CSphVector<BYTE>> & dTmpDocstoreAttrStorage, const CSphVector<std::unique_ptr<OpenHashTable_T<uint64_t, uint64_t>>> & dJoinedOffsets, CSphReader & tJoinedReader ); // fixme! build only
bool Build_StoreBlobAttrs ( DocID_t tDocId, std::pair<SphOffset_t,SphOffset_t> & tOffsetSize, BlobRowBuilder_i & tBlobRowBuilderconst, QueryMvaContainer_c & tMvaContainer, AttrSource_i & tSource, bool bForceSource ); // fixme! build only
bool Build_CollectQueryMvas ( const CSphVector<CSphSource*> & dSources, QueryMvaContainer_c & tMvaContainer ); // build only
bool Build_CollectJoinedFields ( const CSphVector<CSphSource*> & dSources, CSphAutofile & tFile, CSphVector<std::unique_ptr<OpenHashTable_T<uint64_t, uint64_t>>> & dJoinedOffsets );
bool SpawnReader ( DataReaderFactoryPtr_c & m_pFile, ESphExt eExt, DataReaderFactory_c::Kind_e eKind, int iBuffer, FileAccess_e eAccess );
bool SpawnReaders();
RowIteratorsWithEstimates_t CreateColumnarAnalyzerOrPrefilter ( CSphVector<SecondaryIndexInfo_t> & dSIInfo, const CSphVector<CSphFilterSettings> & dFilters, const CSphVector<FilterTreeItem_t> & dFilterTree, const ISphFilter * pFilter, ESphCollation eCollation, const ISphSchema & tSchema, CSphString & sWarning ) const;
template<typename RUN>
bool SplitQuery ( RUN && tRun, CSphQueryResult & tResult, const CSphQuery & tQuery, const VecTraits_T<ISphMatchSorter *> & dAllSorters, const CSphMultiQueryArgs & tArgs, int64_t tmMaxTimer ) const;
bool ChooseIterators ( CSphVector<SecondaryIndexInfo_t> & dSIInfo, const CSphQuery & tQuery, const CSphVector<CSphFilterSettings> & dFilters, CSphQueryContext & tCtx, CreateFilterContext_t & tFlx, const ISphSchema & tMaxSorterSchema, CSphQueryResultMeta & tMeta, int iCutoff, int iThreads, CSphVector<CSphFilterSettings> & dModifiedFilters, ISphRanker * pRanker ) const;
std::pair<RowidIterator_i *, bool> SpawnIterators ( const CSphQuery & tQuery, const CSphVector<CSphFilterSettings> & dFilters, CSphQueryContext & tCtx, CreateFilterContext_t & tFlx, const ISphSchema & tMaxSorterSchema, CSphQueryResultMeta & tMeta, int iCutoff, int iThreads, CSphVector<CSphFilterSettings> & dModifiedFilters, ISphRanker * pRanker ) const;
bool SelectIteratorsFT ( const CSphQuery & tQuery, const CSphVector<CSphFilterSettings> & dFilters, const ISphSchema & tSorterSchema, ISphRanker * pRanker, CSphVector<SecondaryIndexInfo_t> & dSIInfo, int iCutoff, int iThreads, StrVec_t & dWarnings ) const;
bool IsQueryFast ( const CSphQuery & tQuery, const CSphVector<SecondaryIndexInfo_t> & dEnabledIndexes, float fCost ) const;
CSphVector<SecondaryIndexInfo_t> GetEnabledIndexes ( const CSphQuery & tQuery, bool bFT, float & fCost, int iThreads ) const;
bool SetupFiltersAndContext ( CSphQueryContext & tCtx, CreateFilterContext_t & tFlx, CSphQueryResultMeta & tMeta, const ISphSchema * & pMaxSorterSchema, CSphVector<CSphFilterSettings> & dTransformedFilters, CSphVector<FilterTreeItem_t> & dTransformedFilterTree, std::unique_ptr<ISphSchema> & pModifiedMatchSchema, const VecTraits_T<ISphMatchSorter *> & dSorters, const CSphMultiQueryArgs & tArgs ) const;
Docstore_i * GetDocstore() const override { return m_pDocstore.get(); }
columnar::Columnar_i * GetColumnar() const override { return m_pColumnar.get(); }
const DWORD * GetRawAttrs() const override { return m_tAttr.GetReadPtr(); }
const BYTE * GetRawBlobAttrs() const override { return m_tBlobAttrs.GetReadPtr(); }
bool AlterSI ( CSphString & sError ) override;
};
/////////////////////////////////////////////////////////////////////////////
// UTILITY FUNCTIONS
/////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
void sphSleepMsec ( int iMsec )
{
if ( iMsec<0 )
return;
#if _WIN32
Sleep ( iMsec );
#else
struct timeval tvTimeout;
tvTimeout.tv_sec = iMsec / 1000; // full seconds
tvTimeout.tv_usec = ( iMsec % 1000 ) * 1000; // remainder is msec, so *1000 for usec
select ( 0, NULL, NULL, NULL, &tvTimeout ); // FIXME? could handle EINTR
#endif
}
void SetUnhintedBuffer ( int iReadUnhinted )
{
if ( iReadUnhinted<=0 )
iReadUnhinted = DEFAULT_READ_UNHINTED;
g_iReadUnhinted = Max ( iReadUnhinted, MIN_READ_UNHINTED );
}
int GetUnhintedBuffer()
{
return g_iReadUnhinted;
}
// returns correct size even if iBuf is 0
int GetReadBuffer ( int iBuf )
{
if ( !iBuf )
return DEFAULT_READ_BUFFER;
return Max ( iBuf, MIN_READ_BUFFER );
}
bool IsMlock ( FileAccess_e eType ) { return eType==FileAccess_e::MLOCK; }
bool IsOndisk ( FileAccess_e eType ) { return eType==FileAccess_e::FILE || eType==FileAccess_e::MMAP; }
bool FileAccessSettings_t::operator== ( const FileAccessSettings_t & tOther ) const
{
return ( m_eAttr==tOther.m_eAttr && m_eBlob==tOther.m_eBlob && m_eDoclist==tOther.m_eDoclist && m_eHitlist==tOther.m_eHitlist && m_eDict==tOther.m_eDict &&
m_iReadBufferDocList==tOther.m_iReadBufferDocList && m_iReadBufferHitList==tOther.m_iReadBufferHitList );
}
bool FileAccessSettings_t::operator!= ( const FileAccessSettings_t & tOther ) const
{
return !operator==( tOther );
}
//////////////////////////////////////////////////////////////////////////
RowTagged_t::RowTagged_t ( const CSphMatch & tMatch )
{
m_tID = tMatch.m_tRowID;
m_iTag = tMatch.m_iTag;
}
RowTagged_t::RowTagged_t ( RowID_t tRowID, int iTag )
{
m_tID = tRowID;
m_iTag = iTag;
}
bool RowTagged_t::operator== ( const RowTagged_t & tRow ) const
{
return ( m_tID==tRow.m_tID && m_iTag==tRow.m_iTag );
}
bool RowTagged_t::operator!= ( const RowTagged_t & tRow ) const
{
return !( *this==tRow );
}
/////////////////////////////////////////////////////////////////////////////
void CSphEmbeddedFiles::Reset()
{
m_dSynonyms.Reset();
m_dStopwordFiles.Reset();
m_dStopwords.Reset();
m_dWordforms.Reset();
m_dWordformFiles.Reset();
}
/////////////////////////////////////////////////////////////////////////////
// FILTER
/////////////////////////////////////////////////////////////////////////////
void CSphFilterSettings::SetExternalValues ( const VecTraits_T<SphAttr_t>& dValues )
{
m_dExtValues = dValues;
}
bool CSphFilterSettings::operator == ( const CSphFilterSettings & rhs ) const
{
// check name, mode, type
if ( m_sAttrName!=rhs.m_sAttrName || m_bExclude!=rhs.m_bExclude || m_eType!=rhs.m_eType )
return false;
switch ( m_eType )
{
case SPH_FILTER_RANGE:
return m_iMinValue==rhs.m_iMinValue && m_iMaxValue==rhs.m_iMaxValue;
case SPH_FILTER_FLOATRANGE:
return m_fMinValue==rhs.m_fMinValue && m_fMaxValue==rhs.m_fMaxValue;
case SPH_FILTER_VALUES:
if ( m_dValues.GetLength()!=rhs.m_dValues.GetLength() )
return false;
ARRAY_FOREACH ( i, m_dValues )
if ( m_dValues[i]!=rhs.m_dValues[i] )
return false;
return true;
case SPH_FILTER_STRING:
case SPH_FILTER_USERVAR:
case SPH_FILTER_STRING_LIST:
if ( m_dStrings.GetLength ()!=rhs.m_dStrings.GetLength () )
return false;
ARRAY_FOREACH ( i, m_dStrings )
if ( m_dStrings[i]!=rhs.m_dStrings[i] )
return false;
return ( m_eMvaFunc==rhs.m_eMvaFunc );
default:
assert ( 0 && "internal error: unhandled filter type in comparison" );
return false;
}
}
uint64_t CSphFilterSettings::GetHash() const
{
uint64_t h = sphFNV64 ( &m_eType, sizeof(m_eType) );
h = sphFNV64 ( &m_bExclude, sizeof(m_bExclude), h );
switch ( m_eType )
{
case SPH_FILTER_VALUES:
{
int t = m_dValues.GetLength();
h = sphFNV64 ( &t, sizeof(t), h );
h = sphFNV64 ( m_dValues.Begin(), t*sizeof(SphAttr_t), h );
break;
}
case SPH_FILTER_RANGE:
h = sphFNV64 ( &m_iMaxValue, sizeof(m_iMaxValue), sphFNV64 ( &m_iMinValue, sizeof(m_iMinValue), h ) );
break;
case SPH_FILTER_FLOATRANGE:
h = sphFNV64 ( &m_fMaxValue, sizeof(m_fMaxValue), sphFNV64 ( &m_fMinValue, sizeof(m_fMinValue), h ) );
break;
case SPH_FILTER_STRING:
case SPH_FILTER_USERVAR:
case SPH_FILTER_STRING_LIST:
ARRAY_FOREACH ( iString, m_dStrings )
h = sphFNV64cont ( m_dStrings[iString].cstr(), h );
if ( m_eMvaFunc!=SPH_MVAFUNC_NONE )
h = sphFNV64 ( &m_eMvaFunc, sizeof ( m_eMvaFunc ), h );
break;
case SPH_FILTER_NULL:
break;
default:
assert ( 0 && "internal error: unhandled filter type in GetHash()" );
}
return h;
}
bool FilterTreeItem_t::operator == ( const FilterTreeItem_t & rhs ) const
{
return ( m_iLeft==rhs.m_iLeft && m_iRight==rhs.m_iRight && m_iFilterItem==rhs.m_iFilterItem && m_bOr==rhs.m_bOr );
}
uint64_t FilterTreeItem_t::GetHash() const
{
uint64_t uHash = sphFNV64 ( &m_iLeft, sizeof(m_iLeft) );
uHash = sphFNV64 ( &m_iRight, sizeof(m_iRight), uHash );
uHash = sphFNV64 ( &m_iFilterItem, sizeof(m_iFilterItem), uHash );
uHash = sphFNV64 ( &m_bOr, sizeof(m_bOr), uHash );
return uHash;
}
/////////////////////////////////////////////////////////////////////////////
// QUERY
/////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
struct SelectBounds_t
{
int m_iStart;
int m_iEnd;
};
#define YYSTYPE SelectBounds_t
class SelectParser_t;
#include "bissphinxselect.h"
class SelectParser_t
{
public:
int GetToken ( YYSTYPE * lvalp );
void AddItem ( YYSTYPE * pExpr, ESphAggrFunc eAggrFunc=SPH_AGGR_NONE, YYSTYPE * pStart=NULL, YYSTYPE * pEnd=NULL );
void AddItem ( const char * pToken, YYSTYPE * pStart=NULL, YYSTYPE * pEnd=NULL );
void AliasLastItem ( YYSTYPE * pAlias );
void AddOption ( YYSTYPE * pOpt, YYSTYPE * pVal );
private:
void AutoAlias ( CSphQueryItem & tItem, YYSTYPE * pStart, YYSTYPE * pEnd );
bool IsTokenEqual ( YYSTYPE * pTok, const char * sRef );
public:
CSphString m_sParserError;
const char * m_pLastTokenStart;
const char * m_pStart;
const char * m_pCur;
CSphQuery * m_pQuery;
};
static int yylex ( YYSTYPE * lvalp, SelectParser_t * pParser )
{
return pParser->GetToken ( lvalp );
}
static void yyerror ( SelectParser_t * pParser, const char * sMessage )
{
pParser->m_sParserError.SetSprintf ( "P07: %s near '%s'", sMessage, pParser->m_pLastTokenStart );
}
#include "bissphinxselect.c"
int SelectParser_t::GetToken ( YYSTYPE * lvalp )
{
// skip whitespace, check eof
while ( isspace ( *m_pCur ) )
m_pCur++;
if ( !*m_pCur )
return 0;
// begin working that token
m_pLastTokenStart = m_pCur;
lvalp->m_iStart = int ( m_pCur-m_pStart );
// check for constant
if ( isdigit ( *m_pCur ) )
{
char * pEnd = NULL;
double VARIABLE_IS_NOT_USED fDummy = strtod ( m_pCur, &pEnd );
m_pCur = pEnd;
lvalp->m_iEnd = int ( m_pCur-m_pStart );
return SEL_TOKEN;
}
// check for token
if ( sphIsAttr ( m_pCur[0] ) || ( m_pCur[0]=='@' && sphIsAttr ( m_pCur[1] ) && !isdigit ( m_pCur[1] ) ) )
{
m_pCur++;
while ( sphIsAttr ( *m_pCur ) ) m_pCur++;
lvalp->m_iEnd = int ( m_pCur-m_pStart );
#define LOC_CHECK(_str,_len,_ret) \
if ( lvalp->m_iEnd==_len+lvalp->m_iStart && strncasecmp ( m_pStart+lvalp->m_iStart, _str, _len )==0 ) return _ret;
LOC_CHECK ( "ID", 2, SEL_ID );
LOC_CHECK ( "AS", 2, SEL_AS );
LOC_CHECK ( "OR", 2, TOK_OR );
LOC_CHECK ( "AND", 3, TOK_AND );
LOC_CHECK ( "NOT", 3, TOK_NOT );
LOC_CHECK ( "DIV", 3, TOK_DIV );
LOC_CHECK ( "MOD", 3, TOK_MOD );
LOC_CHECK ( "AVG", 3, SEL_AVG );
LOC_CHECK ( "MIN", 3, SEL_MIN );
LOC_CHECK ( "MAX", 3, SEL_MAX );
LOC_CHECK ( "SUM", 3, SEL_SUM );
LOC_CHECK ( "GROUP_CONCAT", 12, SEL_GROUP_CONCAT );
LOC_CHECK ( "GROUPBY", 7, SEL_GROUPBY );
LOC_CHECK ( "COUNT", 5, SEL_COUNT );
LOC_CHECK ( "DISTINCT", 8, SEL_DISTINCT );
LOC_CHECK ( "WEIGHT", 6, SEL_WEIGHT );
LOC_CHECK ( "OPTION", 6, SEL_OPTION );
LOC_CHECK ( "IS", 2, TOK_IS );
LOC_CHECK ( "NULL", 4, TOK_NULL );
LOC_CHECK ( "FOR", 3, TOK_FOR );
LOC_CHECK ( "IN", 2, TOK_FUNC_IN );
LOC_CHECK ( "RAND", 4, TOK_FUNC_RAND );
#undef LOC_CHECK
return SEL_TOKEN;
}
// check for equality checks
lvalp->m_iEnd = 1+lvalp->m_iStart;
switch ( *m_pCur )
{
case '<':
m_pCur++;
if ( *m_pCur=='>' ) { m_pCur++; lvalp->m_iEnd++; return TOK_NE; }
if ( *m_pCur=='=' ) { m_pCur++; lvalp->m_iEnd++; return TOK_LTE; }
return '<';
case '>':
m_pCur++;
if ( *m_pCur=='=' ) { m_pCur++; lvalp->m_iEnd++; return TOK_GTE; }
return '>';
case '=':
m_pCur++;
if ( *m_pCur=='=' ) { m_pCur++; lvalp->m_iEnd++; }
return TOK_EQ;
case '\'':
{
const char cEnd = *m_pCur;
for ( const char * s = m_pCur+1; *s; s++ )
{
if ( *s==cEnd && s-1>=m_pCur && *(s-1)!='\\' )
{
m_pCur = s+1;
return TOK_CONST_STRING;
}
}
return -1;
}
}
// check for comment begin/end
if ( m_pCur[0]=='/' && m_pCur[1]=='*' )
{
m_pCur += 2;
lvalp->m_iEnd += 1;
return SEL_COMMENT_OPEN;
}
if ( m_pCur[0]=='*' && m_pCur[1]=='/' )
{
m_pCur += 2;
lvalp->m_iEnd += 1;
return SEL_COMMENT_CLOSE;
}
// return char as a token
return *m_pCur++;
}
void SelectParser_t::AutoAlias ( CSphQueryItem & tItem, YYSTYPE * pStart, YYSTYPE * pEnd )
{
if ( pStart && pEnd )
{
tItem.m_sAlias.SetBinary ( m_pStart + pStart->m_iStart, pEnd->m_iEnd - pStart->m_iStart );
sphColumnToLowercase ( const_cast<char *>( tItem.m_sAlias.cstr() ) ); // as in SqlParser_c
} else
tItem.m_sAlias = tItem.m_sExpr;
}
void SelectParser_t::AddItem ( YYSTYPE * pExpr, ESphAggrFunc eAggrFunc, YYSTYPE * pStart, YYSTYPE * pEnd )
{
CSphQueryItem & tItem = m_pQuery->m_dItems.Add();
tItem.m_sExpr.SetBinary ( m_pStart + pExpr->m_iStart, pExpr->m_iEnd - pExpr->m_iStart );
sphColumnToLowercase ( const_cast<char *>( tItem.m_sExpr.cstr() ) );
tItem.m_eAggrFunc = eAggrFunc;
AutoAlias ( tItem, pStart, pEnd );
}
void SelectParser_t::AddItem ( const char * pToken, YYSTYPE * pStart, YYSTYPE * pEnd )
{
CSphQueryItem & tItem = m_pQuery->m_dItems.Add();
tItem.m_sExpr = pToken;
tItem.m_eAggrFunc = SPH_AGGR_NONE;
sphColumnToLowercase ( const_cast<char *>( tItem.m_sExpr.cstr() ) );
AutoAlias ( tItem, pStart, pEnd );
}
void SelectParser_t::AliasLastItem ( YYSTYPE * pAlias )
{
if ( pAlias )
{
CSphQueryItem & tItem = m_pQuery->m_dItems.Last();
tItem.m_sAlias.SetBinary ( m_pStart + pAlias->m_iStart, pAlias->m_iEnd - pAlias->m_iStart );
tItem.m_sAlias.ToLower();
}
}
bool SelectParser_t::IsTokenEqual ( YYSTYPE * pTok, const char * sRef )
{
auto iLen = (int) strlen(sRef);
if ( iLen!=( pTok->m_iEnd - pTok->m_iStart ) )
return false;
return strncasecmp ( m_pStart + pTok->m_iStart, sRef, iLen )==0;
}
void SelectParser_t::AddOption ( YYSTYPE * pOpt, YYSTYPE * pVal )
{
if ( IsTokenEqual ( pOpt, "sort_method" ) )
{
if ( IsTokenEqual ( pVal, "kbuffer" ) )
m_pQuery->m_bSortKbuffer = true;
} else if ( IsTokenEqual ( pOpt, "max_predicted_time" ) )
{
char szNumber[256];
int iLen = pVal->m_iEnd-pVal->m_iStart;
assert ( iLen < (int)sizeof(szNumber) );
strncpy ( szNumber, m_pStart+pVal->m_iStart, iLen );
int64_t iMaxPredicted = strtoull ( szNumber, NULL, 10 );
m_pQuery->m_iMaxPredictedMsec = int(iMaxPredicted > INT_MAX ? INT_MAX : iMaxPredicted );
}
}
bool ParseSelectList ( CSphString & sError, CSphQuery & tQuery )
{
tQuery.m_dItems.Reset();
if ( tQuery.m_sSelect.IsEmpty() )
return true; // empty is ok; will just return everything
SelectParser_t tParser;
tParser.m_pStart = tQuery.m_sSelect.cstr();
tParser.m_pCur = tParser.m_pStart;
tParser.m_pQuery = &tQuery;
yyparse ( &tParser );
sError = tParser.m_sParserError;
return sError.IsEmpty ();
}
int ExpandKeywords ( int iIndexOpt, QueryOption_e eQueryOpt, const CSphIndexSettings & tSettings, bool bWordDict )
{
if ( tSettings.m_iMinInfixLen<=0 && tSettings.GetMinPrefixLen ( bWordDict )<=0 && !tSettings.m_bIndexExactWords )
return KWE_DISABLED;
int iOpt = KWE_DISABLED;
if ( eQueryOpt==QUERY_OPT_DEFAULT )
iOpt = iIndexOpt;
else if ( eQueryOpt==QUERY_OPT_MORPH_NONE )
iOpt = KWE_MORPH_NONE;
else
iOpt = ( eQueryOpt==QUERY_OPT_ENABLED ? KWE_ENABLED : KWE_DISABLED );
if ( ( iOpt & KWE_STAR )==KWE_STAR && tSettings.m_iMinInfixLen<=0 && tSettings.GetMinPrefixLen ( bWordDict )<=0 )
iOpt ^= KWE_STAR;
if ( ( iOpt & KWE_EXACT )==KWE_EXACT && !tSettings.m_bIndexExactWords )
iOpt ^= KWE_EXACT;
if ( ( iOpt & KWE_MORPH_NONE )==KWE_MORPH_NONE && !tSettings.m_bIndexExactWords )
iOpt ^= KWE_MORPH_NONE;
return iOpt;
}
/////////////////////////////////////////////////////////////////////////////
// QUERY STATS
/////////////////////////////////////////////////////////////////////////////
void CSphQueryStats::Add ( const CSphQueryStats & tStats )
{
m_iFetchedDocs += tStats.m_iFetchedDocs;
m_iFetchedHits += tStats.m_iFetchedHits;
m_iSkips += tStats.m_iSkips;
}
/////////////////////////////////////////////////////////////////////////////
// SCHEMAS
/////////////////////////////////////////////////////////////////////////////
/// make string lowercase but keep case of JSON.field
void sphColumnToLowercase ( char * sVal )
{
if ( !sVal || !*sVal )
return;
// make all chars lowercase but only prior to '.', ',', and '[' delimiters
// leave quoted values unchanged
for ( bool bQuoted=false; *sVal && *sVal!='.' && *sVal!=',' && *sVal!='['; sVal++ )
{
if ( !bQuoted )
*sVal = (char) tolower ( *sVal );
if ( *sVal=='\'' )
bQuoted = !bQuoted;
}
}
CSphMultiQueryArgs::CSphMultiQueryArgs ( int iIndexWeight )
: m_iIndexWeight ( iIndexWeight )
{
assert ( iIndexWeight>0 );
}
/////////////////////////////////////////////////////////////////////////////
// INDEX
/////////////////////////////////////////////////////////////////////////////
CSphIndex::CSphIndex ( CSphString sIndexName, CSphString sFileBase )
: IndexFileBase_c { sFileBase }
, m_tSchema { std::move ( sFileBase ) }
, m_sIndexName ( std::move ( sIndexName ) )
{
m_iIndexId = GetIndexUid();
m_tMutableSettings = MutableIndexSettings_c::GetDefaults();
}
CSphIndex::~CSphIndex ()
{
QcacheDeleteIndex ( m_iIndexId );
SkipCache::DeleteAll ( m_iIndexId );
}
void CSphIndex::SetInplaceSettings ( int iHitGap, float fRelocFactor, float fWriteFactor ) // fixme! build only
{
m_iHitGap = iHitGap;
m_fRelocFactor = fRelocFactor;
m_fWriteFactor = fWriteFactor;
m_bInplaceSettings = true;
}
void CSphIndex::SetFieldFilter ( std::unique_ptr<ISphFieldFilter> pFieldFilter )
{
m_pFieldFilter = std::move ( pFieldFilter );
}
void CSphIndex::SetTokenizer ( TokenizerRefPtr_c pTokenizer )
{
m_pTokenizer = std::move ( pTokenizer );
}
void CSphIndex::SetupQueryTokenizer()
{
bool bWordDict = m_pDict->GetSettings().m_bWordDict;
// create and setup a master copy of query time tokenizer
// that we can then use to create lightweight clones
m_pQueryTokenizer = sphCloneAndSetupQueryTokenizer ( m_pTokenizer, IsStarDict ( bWordDict ), m_tSettings.m_bIndexExactWords, false );
m_pQueryTokenizerJson = sphCloneAndSetupQueryTokenizer ( m_pTokenizer, IsStarDict ( bWordDict ), m_tSettings.m_bIndexExactWords, true );
}
void CSphIndex::PostSetup()
{
SetupQueryTokenizer();
const CSphDictSettings & tDictSettings = m_pDict->GetSettings();
if ( !ParseMorphFields ( tDictSettings.m_sMorphology, tDictSettings.m_sMorphFields, m_tSchema.GetFields(), m_tMorphFields, m_sLastError ) )
sphWarning ( "table '%s': %s", GetName(), m_sLastError.cstr() );
}
TokenizerRefPtr_c CSphIndex::GetTokenizer() const
{
return m_pTokenizer;
}
TokenizerRefPtr_c CSphIndex::GetQueryTokenizer() const
{
return m_pQueryTokenizer;
}
TokenizerRefPtr_c& CSphIndex::ModifyTokenizer ()
{
return m_pTokenizer;
}
void CSphIndex::SetDictionary ( DictRefPtr_c pDict )
{
m_pDict = std::move ( pDict );
}
DictRefPtr_c CSphIndex::GetDictionary() const
{
return m_pDict;
}
void CSphIndex::Setup ( const CSphIndexSettings & tSettings )
{
m_tSettings = tSettings;
}
void CSphIndex::SetCacheSize ( int iMaxCachedDocs, int iMaxCachedHits )
{
m_iMaxCachedDocs = iMaxCachedDocs;
m_iMaxCachedHits = iMaxCachedHits;
}
float CSphIndex::GetGlobalIDF ( const CSphString & sWord, int64_t iDocsLocal, bool bPlainIDF ) const
{
auto pIDFer = sph::GetIDFer ( m_sGlobalIDFPath );
if ( !pIDFer )
return 0.0;
return pIDFer->GetIDF ( sWord, iDocsLocal, bPlainIDF );
}
bool CSphIndex::HasGlobalIDF() const
{
return ( !m_sGlobalIDFPath.IsEmpty() && sph::GetIDFer ( m_sGlobalIDFPath ) );
}
int CSphIndex::UpdateAttributes ( AttrUpdateSharedPtr_t pUpd, bool & bCritical, CSphString & sError, CSphString & sWarning )
{
AttrUpdateInc_t tUpdInc { std::move (pUpd) };
return UpdateAttributes ( tUpdInc, bCritical, sError, sWarning );
}
int CSphIndex::UpdateAttributes ( AttrUpdateInc_t& tUpd, bool& bCritical, CSphString& sError, CSphString& sWarning )
{
return CheckThenUpdateAttributes ( tUpd, bCritical, sError, sWarning );
}
CSphVector<SphAttr_t> CSphIndex::BuildDocList () const
{
TlsMsg::ResetErr(); // reset error
return {};
}
void CSphIndex::GetFieldFilterSettings ( CSphFieldFilterSettings & tSettings ) const
{
if ( m_pFieldFilter )
m_pFieldFilter->GetSettings ( tSettings );
}
void CSphIndex::SetMutableSettings ( const MutableIndexSettings_c & tSettings )
{
m_tMutableSettings = tSettings;
}
static bool DetectNonClonableSorters ( const CSphQuery & tQuery )
{
if ( !tQuery.m_sGroupDistinct.IsEmpty() )
return true;
// FIXME: also need to handle
// 1. Stateful UDFs
// 2. Update/delete queue
return false;
}
static bool DetectPrecalcSorters ( const CSphQuery & tQuery, const ISphSchema & tIndexSchema, bool bHasSI )
{
if ( tQuery.m_dItems.any_of ( []( auto & tItem ){ return tItem.m_eAggrFunc!=SPH_AGGR_NONE; } ) )
return false;
if ( !HasImplicitGrouping(tQuery) )
return false;
if ( !tQuery.m_sQuery.IsEmpty() )
return false;
if ( !tQuery.m_sKNNAttr.IsEmpty() )
return false;
bool bDistinct = !tQuery.m_sGroupDistinct.IsEmpty();
if ( bHasSI )
{
// check for count distinct precalc
if ( bDistinct && tQuery.m_dFilters.IsEmpty() )
return true;
// check for count(*) precalc w/one filter
if ( !bDistinct && tQuery.m_dFilters.GetLength()==1 )
{
if ( tIndexSchema.GetAttr ( tQuery.m_dFilters[0].m_sAttrName.cstr() ) )
return true;
}
}
// check for count(*) w/o filters
if ( !bDistinct && tQuery.m_dFilters.IsEmpty() )
return true;
return false;
}
bool CSphIndex::MustRunInSingleThread ( const VecTraits_T<const CSphQuery> & dQueries, bool bHasSI, const VecTraits_T<int64_t> & dMaxCountDistinct, bool & bForceSingleThread ) const
{
ARRAY_FOREACH ( i, dQueries )
{
auto & tQuery = dQueries[i];
// check for potential non-clonable sorters (we don't have actual sorters at this stage)
if ( DetectNonClonableSorters(tQuery) )
return true;
if ( DetectPrecalcSorters ( tQuery, m_tSchema, bHasSI ) )
return true;
// at this point we are trying to decide how many threads this index gets
// we did not correct max_matches yet (to achieve max grouping accuracy)
// but if increasing max_matches would be enough to achieve max accuracy, there's no need to turn off multithreading
// that's why now we try to guess if increasing max_matches is enough
int iMaxCountDistinct = dMaxCountDistinct[i];
bool bAccurateAggregation = tQuery.m_bExplicitAccurateAggregation ? tQuery.m_bAccurateAggregation : GetAccurateAggregationDefault();
if ( bAccurateAggregation && !tQuery.m_sGroupBy.IsEmpty() )
{
int iGroupby = GetAliasedAttrIndex ( tQuery.m_sGroupBy, tQuery, m_tSchema );
if ( iGroupby>0 )
{
if ( iMaxCountDistinct==-1 )
{
CSphString sModifiedAttr;
iMaxCountDistinct = GetCountDistinct ( tQuery.m_sGroupBy, sModifiedAttr );
}
if ( iMaxCountDistinct==-1 )
{
bForceSingleThread = true;
return true; // no info on max_matches; disable ps
}
else
{
if ( tQuery.m_bExplicitMaxMatches && iMaxCountDistinct > tQuery.m_iMaxMatches )
{
bForceSingleThread = true;
return true; // can't change max_matches and not enough were set; disable ps
}
if ( iMaxCountDistinct > tQuery.m_iMaxMatchThresh )
{
bForceSingleThread = true;
return true; // max_matches can't be increased; disable ps
}
}
}
}
}
return GetStats().m_iTotalDocuments<=g_iPseudoShardingThresh;
}
//////////////////////////////////////////////////////////////////////////
static void PooledAttrsToPtrAttrs ( const VecTraits_T<ISphMatchSorter *> & dSorters, const BYTE * pBlobPool, columnar::Columnar_i * pColumnar, bool bFinalizeMatches, QueryProfile_c * pProfile, bool bModifySorterSchemas )
{
if ( !bModifySorterSchemas )
return;
CSphScopedProfile tProfile ( pProfile, SPH_QSTATE_DYNAMIC );
dSorters.Apply ( [&] ( ISphMatchSorter * p )
{
if ( p )
p->TransformPooled2StandalonePtrs ( [pBlobPool] ( const CSphMatch * ) { return pBlobPool; }, [pColumnar] ( const CSphMatch * ) { return pColumnar; }, bFinalizeMatches );
});
}
std::unique_ptr<CSphIndex> sphCreateIndexPhrase ( CSphString sIndexName, CSphString sFilename )
{
return std::make_unique<CSphIndex_VLN> ( std::move ( sIndexName ), std::move ( sFilename ) );
}
//////////////////////////////////////////////////////////////////////////
CSphIndex_VLN::CSphIndex_VLN ( CSphString sIndexName, CSphString sFilename )
: CSphIndex ( std::move ( sIndexName ), std::move ( sFilename ) )
, m_iLockFD ( -1 )
, m_dFieldLens ( SPH_MAX_FIELDS )
{
m_iDocinfo = 0;
m_iDocinfoIndex = 0;
m_pDocinfoIndex = nullptr;
m_uVersion = INDEX_FORMAT_VERSION;
m_bPassedRead = false;
m_bPassedAlloc = false;
m_bIsEmpty = true;
m_bDebugCheck = false;
m_uAttrsStatus = 0;
m_iMinMaxIndex = 0;
m_dFieldLens.ZeroVec();
}
CSphIndex_VLN::~CSphIndex_VLN ()
{
SafeDelete ( m_pHistograms );
Unlock();
}
class DocIdIndexReader_c
{
public:
DocIdIndexReader_c ( const VecTraits_T<int>& dIdx, const VecTraits_T<DocID_t>& dDocids )
: m_pCur ( dIdx.begin() )
, m_pEnd ( dIdx.end() )
, m_dDocids ( dDocids )
{}
inline bool ReadDocID ( DocID_t & tDocID )
{
if ( m_pCur>=m_pEnd )
return false;
tDocID = m_dDocids[*m_pCur];
++m_pCur;
return true;
}
int GetIndex() const
{
return *( m_pCur-1 );
}
static inline void HintDocID ( DocID_t ) {}
private:
const int * m_pCur;
const int * m_pEnd;
const VecTraits_T<DocID_t> & m_dDocids;
};
// fill collect rows which will be updated in this index
RowsToUpdateData_t CSphIndex_VLN::Update_CollectRowPtrs ( const UpdateContext_t & tCtx )
{
TRACE_CORO ( "sph", "CSphIndex_VLN::Update_CollectRowPtrs" );
RowsToUpdateData_t dRowsToUpdate;
const auto & dDocids = tCtx.m_tUpd.m_pUpdate->m_dDocids;
// collect idxes of alive (not-yet-updated) rows
CSphVector<int> dSorted;
dSorted.Reserve ( dDocids.GetLength() - tCtx.m_tUpd.m_iAffected );
ARRAY_CONSTFOREACH (i, dDocids)
if ( !tCtx.m_tUpd.m_dUpdated.BitGet ( i ) )
dSorted.Add ( i );
if ( dSorted.IsEmpty () )
return dRowsToUpdate;
dSorted.Sort ( Lesser ( [&dDocids] ( int a, int b ) { return dDocids[a]<dDocids[b]; } ) );
DocIdIndexReader_c tSortedReader ( dSorted, dDocids );
LookupReaderIterator_c tLookupReader ( m_tDocidLookup.GetReadPtr() );
Intersect ( tLookupReader, tSortedReader, [&dRowsToUpdate, this] ( RowID_t tRowID, DocID_t, DocIdIndexReader_c& tSortedReader )
{
if ( m_tDeadRowMap.IsSet ( tRowID ) )
return;
auto& dUpd = dRowsToUpdate.Add();
dUpd.m_tRow = tRowID;
dUpd.m_iIdx = tSortedReader.GetIndex();
assert ( dUpd.m_tRow != INVALID_ROWID );
} );
return dRowsToUpdate;
}
// We fill docinfo ptr for actual rows, and move out non-actual (the ones which doesn't point to existing document)
// note, it actually changes (rearranges) rows!
RowsToUpdate_t CSphIndex_VLN::Update_PrepareGatheredRowPtrs ( RowsToUpdate_t & dWRows, const VecTraits_T<DocID_t>& dDocids )
{
RowsToUpdate_t & dRows = dWRows; // that is actually to indicate that we CHANGE contents inside dWRows, so it should be passed by non-const reference.
dRows.Sort ( Lesser ( [&dDocids] ( auto& a, auto& b ) { return dDocids[a.m_iIdx]<dDocids[b.m_iIdx]; } ) );
LookupReaderIterator_c tLookupReader ( m_tDocidLookup.GetReadPtr() );
RowID_t tRowID = INVALID_ROWID;
DocID_t tDocID = 0;
bool bHaveDocs = tLookupReader.Read ( tDocID, tRowID );
bool bHaveDocsToUpdate = !dRows.IsEmpty();
DocID_t tDocIDPrepared = bHaveDocsToUpdate ? dDocids[dRows[0].m_iIdx] : 0;
int iReadIdx = 0;
int iWriteIdx = 0;
while ( bHaveDocs && bHaveDocsToUpdate )
{
if ( tDocID < tDocIDPrepared )
{
tLookupReader.HintDocID ( tDocIDPrepared );
bHaveDocs = tLookupReader.Read ( tDocID, tRowID );
continue;
} else if ( tDocID == tDocIDPrepared )
{
dRows[iWriteIdx].m_tRow = tRowID;
assert ( tRowID != INVALID_ROWID );
Swap ( dRows[iWriteIdx].m_iIdx, dRows[iReadIdx].m_iIdx );
bHaveDocs = tLookupReader.Read ( tDocID, tRowID );
++iWriteIdx;
}
++iReadIdx;
bHaveDocsToUpdate = iReadIdx < dRows.GetLength();
if ( bHaveDocsToUpdate )
tDocIDPrepared = dDocids[dRows[iReadIdx].m_iIdx];
}
return dWRows.Slice ( 0, iWriteIdx );
}
bool CSphIndex_VLN::Update_WriteBlobRow ( UpdateContext_t & tCtx, RowID_t tRowID, ByteBlob_t tBlob, int nBlobAttrs, const CSphAttrLocator & tBlobRowLoc, bool & bCritical, CSphString & sError )
{
auto pDocinfo = tCtx.GetDocinfo ( tRowID );
BYTE * pExistingBlob = m_tBlobAttrs.GetWritePtr() + sphGetRowAttr ( pDocinfo, tBlobRowLoc );
DWORD uExistingBlobLen = sphGetBlobTotalLen ( pExistingBlob, nBlobAttrs );
bCritical = false;
// overwrite old record (because we have write-lock)
if ( (DWORD)tBlob.second<=uExistingBlobLen )
{
memcpy ( pExistingBlob, tBlob.first, tBlob.second );
return true;
}
BYTE * pOldBlobPool = m_tBlobAttrs.GetWritePtr();
SphOffset_t tBlobSpaceUsed = *(SphOffset_t*)pOldBlobPool;
SphOffset_t tSpaceLeft = m_tBlobAttrs.GetLengthBytes()-tBlobSpaceUsed;
// not great: we have to resize our .spb file and create new memory maps
if ( (SphOffset_t)tBlob.second > tSpaceLeft )
{
SphOffset_t tSizeDelta = Max ( (SphOffset_t)tBlob.second-tSpaceLeft, m_tSettings.m_tBlobUpdateSpace );
CSphString sWarning;
size_t tOldSize = m_tBlobAttrs.GetLengthBytes();
if ( !m_tBlobAttrs.Resize ( tOldSize + tSizeDelta, sWarning, sError ) )
{
// try to map again, using old size
if ( !m_tBlobAttrs.Resize ( tOldSize, sWarning, sError ) )
bCritical = true; // real bad, index unusable
sError = "unable to resize .SPB file";
return false;
}
// update blob pool ptr since it might be changed after resize
tCtx.m_pBlobPool = m_tBlobAttrs.GetWritePtr();
}
BYTE * pEnd = m_tBlobAttrs.GetWritePtr() + tBlobSpaceUsed;
memcpy ( pEnd, tBlob.first, tBlob.second );
sphSetRowAttr ( pDocinfo, tBlobRowLoc, tBlobSpaceUsed );
tBlobSpaceUsed += tBlob.second;
*(SphOffset_t*)m_tBlobAttrs.GetWritePtr() = tBlobSpaceUsed;
return true;
}
void CSphIndex_VLN::Update_MinMax ( const RowsToUpdate_t& dRows, const UpdateContext_t & tCtx )
{
int iRowStride = tCtx.m_tSchema.GetRowSize();
for ( const auto & tRow : dRows )
{
auto iBlock = (int64_t) tRow.m_tRow / DOCINFO_INDEX_FREQ;
DWORD * pBlockRanges = m_pDocinfoIndex + ( iBlock * iRowStride * 2 );
DWORD * pIndexRanges = m_pDocinfoIndex + ( m_iDocinfoIndex * iRowStride * 2 );
assert ( iBlock>=0 && iBlock<m_iDocinfoIndex );
const auto* pDocinfo = tCtx.GetDocinfo ( tRow.m_tRow );
ARRAY_CONSTFOREACH ( iCol, tCtx.m_tUpd.m_pUpdate->m_dAttributes )
{
const UpdatedAttribute_t & tUpdAttr = tCtx.m_dUpdatedAttrs[iCol];
if ( !tUpdAttr.m_bExisting )
continue;
const CSphAttrLocator & tLoc = tUpdAttr.m_tLocator;
if ( tLoc.IsBlobAttr() )
continue;
SphAttr_t uValue = sphGetRowAttr ( pDocinfo, tLoc );
// update block and index ranges
for ( int i=0; i<2; i++ )
{
DWORD * pBlock = i ? pBlockRanges : pIndexRanges;
SphAttr_t uMin = sphGetRowAttr ( pBlock, tLoc );
SphAttr_t uMax = sphGetRowAttr ( pBlock+iRowStride, tLoc );
if ( tUpdAttr.m_eAttrType==SPH_ATTR_FLOAT ) // update float's indexes assumes float comparision
{
float fValue = sphDW2F ( (DWORD) uValue );
float fMin = sphDW2F ( (DWORD) uMin );
float fMax = sphDW2F ( (DWORD) uMax );
if ( fValue<fMin )
sphSetRowAttr ( pBlock, tLoc, sphF2DW ( fValue ) );
if ( fValue>fMax )
sphSetRowAttr ( pBlock+iRowStride, tLoc, sphF2DW ( fValue ) );
} else // update usual integers
{
if ( uValue<uMin )
sphSetRowAttr ( pBlock, tLoc, uValue );
if ( uValue>uMax )
sphSetRowAttr ( pBlock+iRowStride, tLoc, uValue );
}
}
}
}
}
// Collect updated docs and store them into vec of
// postponed updates (it might happen be more than one update during the operation)
void CSphIndex_VLN::MaybeAddPostponedUpdate ( RowsToUpdateData_t& dRows, const UpdateContext_t& tCtx )
{
TRACE_CORO ( "sph", "CSphIndex_VLN::MaybeAddPostponedUpdate" );
if ( !m_bAttrsBusy.load ( std::memory_order_acquire ) )
return;
auto& tUpd = tCtx.m_tUpd;
auto& tNewUpdate = m_dPostponedUpdates.Add();
tNewUpdate.m_pUpdate = MakeReusableUpdate ( tUpd.m_pUpdate );
tNewUpdate.m_dRowsToUpdate.SwapData ( dRows );
}
bool CSphIndex_VLN::DoUpdateAttributes ( const RowsToUpdate_t& dRows, UpdateContext_t& tCtx, bool& bCritical, CSphString& sError )
{
TRACE_CORO ( "sph", "CSphIndex_VLN::DoUpdateAttributes" );
if ( dRows.IsEmpty() )
return true;
if ( !Update_CheckAttributes ( *tCtx.m_tUpd.m_pUpdate, tCtx.m_tSchema, sError ) )
return false;
tCtx.m_pHistograms = m_pHistograms;
tCtx.m_pBlobPool = m_tBlobAttrs.GetWritePtr();
tCtx.m_pAttrPool = m_tAttr.GetWritePtr ();
tCtx.PrepareListOfUpdatedAttributes ( sError );
if ( !Update_UpdateAttributes ( dRows, tCtx, bCritical, sError ) )
return false;
Update_MinMax ( dRows, tCtx );
return true;
}
void CommitUpdateAttributes ( int64_t * pTID, const char * szName, const CSphAttrUpdate & tUpd )
{
CSphString sError;
Binlog::Commit ( pTID, szName, sError, [&tUpd] ( Writer_i & tWriter ) {
// my user op
tWriter.PutByte ( Binlog::UPDATE_ATTRS );
// update data
tWriter.ZipOffset ( tUpd.m_dAttributes.GetLength () );
for ( const auto & i: tUpd.m_dAttributes )
{
tWriter.PutZString ( i.m_sName );
tWriter.ZipOffset ( i.m_eType );
}
// POD vectors
Binlog::SaveVector ( tWriter, tUpd.m_dPool );
Binlog::SaveVector ( tWriter, tUpd.m_dDocids );
Binlog::SaveVector ( tWriter, tUpd.m_dRowOffset );
Binlog::SaveVector ( tWriter, tUpd.m_dBlobs );
} );
}
Binlog::CheckTnxResult_t CSphIndex::ReplayUpdate ( CSphReader & tReader, CSphString & sError, Binlog::CheckTxn_fn && fnCanContinue )
{
// load transaction data
AttrUpdateSharedPtr_t pUpd { new CSphAttrUpdate };
auto & tUpd = *pUpd;
tUpd.m_bIgnoreNonexistent = true;
int iAttrs = (int) tReader.UnzipOffset ();
tUpd.m_dAttributes.Resize ( iAttrs ); // FIXME! sanity check
for ( auto & i: tUpd.m_dAttributes )
{
i.m_sName = tReader.GetZString ();
i.m_eType = (ESphAttr) tReader.UnzipOffset (); // safe, we'll crc check later
}
if ( tReader.GetErrorFlag () )
return {};
if ( !Binlog::LoadVector ( tReader, tUpd.m_dPool ) ) return {};
if ( !Binlog::LoadVector ( tReader, tUpd.m_dDocids ) ) return {};
if ( !Binlog::LoadVector ( tReader, tUpd.m_dRowOffset ) ) return {};
if ( !Binlog::LoadVector ( tReader, tUpd.m_dBlobs ) ) return {};
Binlog::CheckTnxResult_t tRes = fnCanContinue ();
if ( tRes.m_bValid && tRes.m_bApply )
{
CSphString sError, sWarning;
bool bCritical = false;
UpdateAttributes ( pUpd, bCritical, sError, sWarning ); // FIXME! check for errors
assert ( !bCritical ); // fixme! handle this
tRes.m_bApply = true;
}
return tRes;
}
Binlog::CheckTnxResult_t CSphIndex_VLN::ReplayTxn ( CSphReader & tReader, CSphString & sError, BYTE uOp, Binlog::CheckTxn_fn && fnCanContinue )
{
switch ( uOp )
{
case Binlog::UPDATE_ATTRS:
return ReplayUpdate ( tReader, sError, std::move ( fnCanContinue ) );
default:
assert ( false && "unknown op provided to replay" );
}
return {};
}
int CSphIndex_VLN::CheckThenUpdateAttributes ( AttrUpdateInc_t& tUpd, bool& bCritical, CSphString& sError, CSphString& sWarning )
{
TRACE_CORO ( "sph", "CSphIndex_VLN::CheckThenUpdateAttributes" );
assert ( tUpd.m_pUpdate->m_dRowOffset.IsEmpty() || tUpd.m_pUpdate->m_dDocids.GetLength()==tUpd.m_pUpdate->m_dRowOffset.GetLength() );
// check if we have to
if ( !m_iDocinfo || tUpd.m_pUpdate->m_dDocids.IsEmpty() )
return 0;
UpdateContext_t tCtx ( tUpd, m_tSchema );
int iUpdated = tUpd.m_iAffected;
auto dRowsToUpdate = Update_CollectRowPtrs ( tCtx );
if ( !DoUpdateAttributes ( dRowsToUpdate, tCtx, bCritical, sError ))
return -1;
MaybeAddPostponedUpdate ( dRowsToUpdate, tCtx );
if ( tCtx.m_uUpdateMask && m_bBinlog )
CommitUpdateAttributes ( &m_iTID, GetName(), *tUpd.m_pUpdate );
m_uAttrsStatus |= tCtx.m_uUpdateMask; // FIXME! add lock/atomic?
if ( ( tCtx.m_uUpdateMask & IndexSegment_c::ATTRS_UPDATED ) || ( tCtx.m_uUpdateMask & IndexSegment_c::ATTRS_BLOB_UPDATED ) )
{
for ( const UpdatedAttribute_t & tAttr : tCtx.m_dUpdatedAttrs )
if ( tAttr.m_iSchemaAttr!=-1 )
m_tSI.ColumnUpdated ( m_tSchema.GetAttr ( tAttr.m_iSchemaAttr ).m_sName );
}
iUpdated = tUpd.m_iAffected - iUpdated;
if ( !tCtx.HandleJsonWarnings ( iUpdated, sWarning, sError ) )
return -1;
return iUpdated;
}
void CSphIndex_VLN::UpdateAttributesOffline ( VecTraits_T<PostponedUpdate_t> & dPostUpdates )
{
if ( dPostUpdates.IsEmpty () )
return;
CSphString sError;
bool bCritical;
for ( auto & tPostUpdate : dPostUpdates )
{
RowsToUpdate_t dRows = Update_PrepareGatheredRowPtrs ( tPostUpdate.m_dRowsToUpdate, tPostUpdate.m_pUpdate->m_dDocids );
AttrUpdateInc_t tUpdInc { tPostUpdate.m_pUpdate }; // don't move, keep update (need twice when split chunks)
UpdateContext_t tCtx ( tUpdInc, m_tSchema );
if ( !DoUpdateAttributes ( dRows, tCtx, bCritical, sError ) )
{
sphWarning ("UpdateAttributesOffline: %s", sError.cstr() );
break;
}
m_uAttrsStatus |= tCtx.m_uUpdateMask; // FIXME! add lock/atomic?
}
}
// safely rename an index file
bool CSphIndex_VLN::JuggleFile ( ESphExt eExt, CSphString & sError, bool bNeedSrc, bool bNeedDst ) const
{
CSphString sExt = GetFilename ( eExt );
CSphString sExtNew = GetTmpFilename ( eExt );
CSphString sExtOld = SphSprintf ( "%s.old", sExt.cstr() );
if ( sph::rename ( sExt.cstr(), sExtOld.cstr() ) )
{
if ( bNeedSrc )
{
sError.SetSprintf ( "rename '%s' to '%s' failed: %s", sExt.cstr(), sExtOld.cstr(), strerror(errno) );
return false;
}
}
if ( sph::rename ( sExtNew.cstr(), sExt.cstr() ) )
{
if ( bNeedDst )
{
if ( bNeedSrc && !sph::rename ( sExtOld.cstr(), sExt.cstr() ) )
{
// rollback failed too!
sError.SetSprintf ( "rollback rename to '%s' failed: %s; TABLE UNUSABLE; FIX FILE NAMES MANUALLY", sExt.cstr(), strerror(errno) );
} else
{
// rollback went ok
sError.SetSprintf ( "rename '%s' to '%s' failed: %s", sExtNew.cstr(), sExt.cstr(), strerror(errno) );
}
return false;
}
}
// all done
::unlink ( sExtOld.cstr() );
return true;
}
bool CSphIndex_VLN::SaveAttributes ( CSphString & sError ) const
{
if ( !m_uAttrsStatus || !m_iDocinfo )
return true;
DWORD uAttrStatus = m_uAttrsStatus;
sphLogDebugvv ( "table '%s' attrs (%u) saving...", GetName(), uAttrStatus );
if ( uAttrStatus & IndexSegment_c::ATTRS_UPDATED )
{
if ( !m_tAttr.Flush ( true, sError ) )
return false;
if ( m_pHistograms && !m_pHistograms->Save ( GetFilename ( SPH_EXT_SPHI ), sError ) )
return false;
}
if ( uAttrStatus & IndexSegment_c::ATTRS_BLOB_UPDATED )
{
if ( !m_tBlobAttrs.Flush ( true, sError ) )
return false;
}
if ( uAttrStatus & IndexSegment_c::ATTRS_ROWMAP_UPDATED )
{
if ( !m_tDeadRowMap.Flush ( true, sError ) )
return false;
}
bool bAttrsUpdated = ( uAttrStatus & IndexSegment_c::ATTRS_UPDATED ) || ( uAttrStatus & IndexSegment_c::ATTRS_BLOB_UPDATED );
if ( bAttrsUpdated && !m_tSI.SaveMeta(sError) )
return false;
if ( m_bBinlog )
Binlog::NotifyIndexFlush ( m_iTID, GetName(), Binlog::NoShutdown, Binlog::NoSave );
if ( m_uAttrsStatus==uAttrStatus )
m_uAttrsStatus = 0;
sphLogDebugvv ( "table '%s' attrs (%u) saved", GetName(), m_uAttrsStatus );
return true;
}
DWORD CSphIndex_VLN::GetAttributeStatus () const
{
return m_uAttrsStatus;
}
//////////////////////////////////////////////////////////////////////////
struct CmpQueuedLookup_fn
{
static DocidRowidPair_t * m_pStorage;
static inline bool IsLess ( const int a, const int b )
{
if ( m_pStorage[a].m_tDocID==m_pStorage[b].m_tDocID )
return m_pStorage[a].m_tRowID < m_pStorage[b].m_tRowID;
return m_pStorage[a].m_tDocID < m_pStorage[b].m_tDocID;
}
};
DocidRowidPair_t * CmpQueuedLookup_fn::m_pStorage = nullptr;
bool CSphIndex_VLN::Alter_IsMinMax ( const CSphRowitem * pDocinfo, int iStride ) const
{
return pDocinfo-m_tAttr.GetReadPtr() >= m_iDocinfo*iStride;
}
bool CSphIndex_VLN::AddRemoveColumnarAttr ( bool bAddAttr, const CSphString & sAttrName, ESphAttr eAttrType, const ISphSchema & tOldSchema, const ISphSchema & tNewSchema, CSphString & sError )
{
BuildBufferSettings_t tSettings; // use default buffer settings
auto pBuilder = CreateColumnarBuilder ( tNewSchema, GetTmpFilename ( SPH_EXT_SPC ), tSettings.m_iBufferColumnar, sError );
if ( !pBuilder )
return false;
return Alter_AddRemoveColumnar ( bAddAttr, m_tSchema, tNewSchema, m_pColumnar.get(), pBuilder.get(), (DWORD)m_iDocinfo, GetName(), sError );
}
bool CSphIndex_VLN::AddRemoveAttribute ( bool bAddAttr, const AttrAddRemoveCtx_t & tCtx, CSphString & sError )
{
AttrEngine_e eAttrEngine = CombineEngines ( m_tSettings.m_eEngine, tCtx.m_eEngine );
AttrAddRemoveCtx_t tNewCtx = tCtx;
if ( eAttrEngine==AttrEngine_e::COLUMNAR )
tNewCtx.m_uFlags |= CSphColumnInfo::ATTR_COLUMNAR;
else
tNewCtx.m_uFlags &= ~( CSphColumnInfo::ATTR_COLUMNAR_HASHES | CSphColumnInfo::ATTR_STORED );
CSphSchema tNewSchema = m_tSchema;
if ( !Alter_AddRemoveFromSchema ( tNewSchema, tNewCtx, bAddAttr, sError ) )
return false;
int iNewStride = tNewSchema.GetRowSize();
int64_t iNewMinMaxIndex = m_iDocinfo * iNewStride;
BuildHeader_t tBuildHeader;
WriteHeader_t tWriteHeader;
PrepareHeaders ( tBuildHeader, tWriteHeader );
tBuildHeader.m_iMinMaxIndex = iNewMinMaxIndex;
tWriteHeader.m_pSchema = &tNewSchema;
// save the header
if ( !IndexBuildDone ( tBuildHeader, tWriteHeader, GetTmpFilename(SPH_EXT_SPH), sError ) )
return false;
// generate new .SPA, .SPB files
CSphWriter tSPAWriter;
CSphWriter tSPBWriter;
tSPAWriter.SetBufferSize ( 524288 );
tSPBWriter.SetBufferSize ( 524288 );
std::unique_ptr<WriteWrapper_c> pSPAWriteWrapper { CreateWriteWrapperDisk(tSPAWriter) };
std::unique_ptr<WriteWrapper_c> pSPBWriteWrapper { CreateWriteWrapperDisk(tSPBWriter) };
CSphString sSPAfile = GetTmpFilename ( SPH_EXT_SPA );
CSphString sSPBfile = GetTmpFilename ( SPH_EXT_SPB );
CSphString sSPHIfile = GetTmpFilename ( SPH_EXT_SPHI );
if ( !tSPAWriter.OpenFile ( sSPAfile, sError ) )
return false;
bool bHadBlobs = false;
for ( int i = 0; i < m_tSchema.GetAttrsCount(); i++ )
bHadBlobs |= sphIsBlobAttr ( m_tSchema.GetAttr(i) );
bool bHaveBlobs = false;
for ( int i = 0; i < tNewSchema.GetAttrsCount(); i++ )
bHaveBlobs |= sphIsBlobAttr ( tNewSchema.GetAttr(i) );
bool bBlob = sphIsBlobAttr ( tCtx.m_eType );
bool bBlobsModified = bBlob && ( bAddAttr || bHaveBlobs==bHadBlobs );
if ( bBlobsModified )
{
if ( !tSPBWriter.OpenFile ( sSPBfile, sError ) )
return false;
tSPBWriter.PutOffset(0);
}
if ( !tNewSchema.GetAttrsCount() )
{
sError = "table must have at least one attribute";
return false;
}
bool bColumnar = bAddAttr ? tNewSchema.GetAttr ( tCtx.m_sName.cstr() )->IsColumnar() : m_tSchema.GetAttr ( tCtx.m_sName.cstr() )->IsColumnar();
if ( bColumnar )
AddRemoveColumnarAttr ( bAddAttr, tCtx.m_sName, tCtx.m_eType, m_tSchema, tNewSchema, sError );
else
{
int64_t iTotalRows = m_iDocinfo + (m_iDocinfoIndex+1)*2;
Alter_AddRemoveRowwiseAttr ( m_tSchema, tNewSchema, m_tAttr.GetReadPtr(), (DWORD)iTotalRows, m_tBlobAttrs.GetReadPtr(), *pSPAWriteWrapper, *pSPBWriteWrapper, bAddAttr, tCtx.m_sName );
}
if ( m_pHistograms )
{
if ( bAddAttr )
{
std::unique_ptr<Histogram_i> pNewHistogram = CreateHistogram ( tCtx.m_sName, tCtx.m_eType );
if ( pNewHistogram )
{
for ( DWORD i = 0; i < m_iDocinfo; i++ )
pNewHistogram->Insert(0);
m_pHistograms->Add ( std::move ( pNewHistogram ) );
}
}
else
m_pHistograms->Remove ( tCtx.m_sName );
if ( !m_pHistograms->Save ( sSPHIfile, sError ) )
return false;
}
if ( !AddRemoveFromDocstore ( m_tSchema, tNewSchema, sError ) )
return false;
if ( tSPAWriter.IsError() )
{
sError.SetSprintf ( "error writing to %s", sSPAfile.cstr() );
return false;
}
tSPAWriter.CloseFile();
bool bHadColumnar = m_tSchema.HasColumnarAttrs();
bool bHaveColumnar = tNewSchema.HasColumnarAttrs();
bool bHadNonColumnar = m_tSchema.HasNonColumnarAttrs();
bool bHaveNonColumnar = tNewSchema.HasNonColumnarAttrs();
m_tAttr.Reset();
if ( bColumnar )
{
if ( !JuggleFile ( SPH_EXT_SPC, sError, bHadColumnar, bHaveColumnar ) )
return false;
if ( tNewSchema.HasColumnarAttrs() )
{
m_pColumnar = CreateColumnarStorageReader ( GetFilename ( SPH_EXT_SPC ), (DWORD)m_iDocinfo, sError );
if ( !m_pColumnar )
return false;
}
else
m_pColumnar.reset();
}
else
{
if ( !JuggleFile ( SPH_EXT_SPA, sError, bHadNonColumnar, bHaveNonColumnar ) )
return false;
}
if ( !JuggleFile ( SPH_EXT_SPH, sError ) )
return false;
if ( !JuggleFile ( SPH_EXT_SPHI, sError ) )
return false;
if ( bHaveNonColumnar && !m_tAttr.Setup ( GetFilename ( SPH_EXT_SPA ), sError, true ) )
return false;
if ( bBlob )
{
m_tBlobAttrs.Reset();
if ( bAddAttr || bHaveBlobs==bHadBlobs )
{
if ( tSPBWriter.IsError() )
{
sError.SetSprintf ( "error writing to %s", sSPBfile.cstr() );
return false;
}
SphOffset_t tPos = tSPBWriter.GetPos();
// FIXME!!! made single function from this mess as order matters here
tSPBWriter.Flush(); // store collected data as SeekTo might got rid of buffer collected so far
tSPBWriter.SeekTo ( 0 );
tSPBWriter.PutOffset ( tPos );
tSPBWriter.SeekTo ( tPos + m_tSettings.m_tBlobUpdateSpace, true );
tSPBWriter.CloseFile();
if ( !JuggleFile ( SPH_EXT_SPB, sError, bHadBlobs ) )
return false;
if ( !m_tBlobAttrs.Setup ( GetFilename ( SPH_EXT_SPB ), sError, true ) )
return false;
} else
::unlink ( GetFilename ( SPH_EXT_SPB ).cstr() );
}
m_tSchema = tNewSchema;
m_iMinMaxIndex = iNewMinMaxIndex;
m_pDocinfoIndex = m_tAttr.GetWritePtr() + m_iMinMaxIndex;
PrereadMapping ( GetName(), "attributes", IsMlock ( m_tMutableSettings.m_tFileAccess.m_eAttr ), IsOndisk ( m_tMutableSettings.m_tFileAccess.m_eAttr ), m_tAttr );
if ( bBlobsModified )
PrereadMapping ( GetName(), "blob attributes", IsMlock ( m_tMutableSettings.m_tFileAccess.m_eBlob ), IsOndisk ( m_tMutableSettings.m_tFileAccess.m_eBlob ), m_tBlobAttrs );
return true;
}
void CSphIndex_VLN::PrepareHeaders ( BuildHeader_t & tBuildHeader, WriteHeader_t & tWriteHeader, bool bCopyDictHeader )
{
tBuildHeader.m_iTotalDocuments = m_tStats.m_iTotalDocuments;
tBuildHeader.m_iTotalBytes = m_tStats.m_iTotalBytes;
tBuildHeader.m_iDocinfo = m_iDocinfo;
tBuildHeader.m_iDocinfoIndex = m_iDocinfoIndex;
tBuildHeader.m_iMinMaxIndex = m_iMinMaxIndex;
if ( bCopyDictHeader )
*(DictHeader_t*)&tBuildHeader = *(DictHeader_t*)&m_tWordlist;
tWriteHeader.m_pSettings = &m_tSettings;
tWriteHeader.m_pSchema = &m_tSchema;
tWriteHeader.m_pTokenizer = m_pTokenizer;
tWriteHeader.m_pDict = m_pDict;
tWriteHeader.m_pFieldFilter = m_pFieldFilter.get();
tWriteHeader.m_pFieldLens = m_dFieldLens.Begin();
tWriteHeader.m_pSI = &m_tSI;
}
bool CSphIndex_VLN::SaveHeader ( CSphString & sError )
{
BuildHeader_t tBuildHeader;
WriteHeader_t tWriteHeader;
PrepareHeaders ( tBuildHeader, tWriteHeader );
// save the header
if ( !IndexBuildDone ( tBuildHeader, tWriteHeader, GetTmpFilename(SPH_EXT_SPH), sError ) )
return false;
return JuggleFile ( SPH_EXT_SPH, sError );
}
void CSphIndex_VLN::FlushDeadRowMap ( bool bWaitComplete ) const
{
// FIXME! handle errors
CSphString sError;
m_tDeadRowMap.Flush ( bWaitComplete, sError );
}
bool CSphIndex_VLN::LoadKillList ( CSphFixedVector<DocID_t> *pKillList, KillListTargets_c & tTargets, CSphString & sError ) const
{
CSphString sSPK = GetFilename ( SPH_EXT_SPK );
if ( !sphIsReadable ( sSPK ) )
return true;
CSphAutoreader tReader;
if ( !tReader.Open ( sSPK, sError ) )
return false;
DWORD nIndexes = tReader.GetDword();
tTargets.m_dTargets.Resize ( nIndexes );
for ( auto & tIndex : tTargets.m_dTargets )
{
tIndex.m_sIndex = tReader.GetString();
tIndex.m_uFlags = tReader.GetDword();
}
if ( pKillList )
{
DWORD nKills = tReader.GetDword();
pKillList->Reset(nKills);
DocID_t tDocID = 0;
for ( int i = 0; i < (int)nKills; i++ )
{
DocID_t tDelta = tReader.UnzipOffset();
assert ( tDelta>0 );
tDocID += tDelta;
(*pKillList)[i] = tDocID;
}
}
if ( tReader.GetErrorFlag() )
{
sError = tReader.GetErrorMessage();
return false;
}
return true;
}
bool WriteKillList ( const CSphString & sFilename, const DocID_t * pKlist, int nEntries, const KillListTargets_c & tTargets, CSphString & sError )
{
if ( !nEntries && !tTargets.m_dTargets.GetLength() )
return true;
CSphWriter tKillList;
if ( !tKillList.OpenFile ( sFilename, sError ) )
return false;
tKillList.PutDword ( tTargets.m_dTargets.GetLength() );
for ( const auto & tTarget : tTargets.m_dTargets )
{
tKillList.PutString ( tTarget.m_sIndex );
tKillList.PutDword ( tTarget.m_uFlags );
}
tKillList.PutDword ( nEntries );
if ( pKlist )
{
DocID_t tPrevDocID = 0;
for ( int i = 0; i < nEntries; i++ )
{
DocID_t tDocID = pKlist[i];
tKillList.ZipOffset ( tDocID-tPrevDocID );
tPrevDocID = tDocID;
}
}
tKillList.CloseFile();
if ( tKillList.IsError() )
{
sError.SetSprintf ( "error writing kill list to %s", sFilename.cstr() );
return false;
}
return true;
}
bool CSphIndex_VLN::AlterKillListTarget ( KillListTargets_c & tTargets, CSphString & sError )
{
CSphFixedVector<DocID_t> dKillList(0);
KillListTargets_c tOldTargets;
if ( !LoadKillList ( &dKillList, tOldTargets, sError ) )
return false;
if ( !WriteKillList ( GetTmpFilename ( SPH_EXT_SPK ), dKillList.Begin(), dKillList.GetLength(), tTargets, sError ) )
return false;
if ( !JuggleFile ( SPH_EXT_SPK, sError, false ) )
return false;
return true;
}
void CSphIndex_VLN::KillExistingDocids ( CSphIndex * pTarget ) const
{
// FIXME! collecting all docids is a waste of memory
LookupReaderIterator_c tLookup ( m_tDocidLookup.GetReadPtr() );
CSphFixedVector<DocID_t> dKillList ( m_iDocinfo );
for ( auto& dKill : dKillList )
tLookup.ReadDocID ( dKill );
pTarget->KillMulti ( dKillList );
}
int CSphIndex_VLN::KillMulti ( const VecTraits_T<DocID_t> & dKlist )
{
LookupReaderIterator_c tTargetReader ( m_tDocidLookup.GetReadPtr() );
DocidListReader_c tKillerReader ( dKlist );
int iTotalKilled;
if ( !HasKillHook() )
iTotalKilled = KillByLookup ( tTargetReader, tKillerReader, m_tDeadRowMap );
else
iTotalKilled = ProcessIntersected ( tTargetReader, tKillerReader, [this] ( RowID_t tRow, DocID_t tDoc )
{
if ( !m_tDeadRowMap.Set ( tRow ) )
return false;
KillHook ( tDoc );
return true;
} );
if ( iTotalKilled )
m_uAttrsStatus |= IndexSegment_c::ATTRS_ROWMAP_UPDATED;
return iTotalKilled;
}
int CSphIndex_VLN::KillDupes()
{
LookupReaderIterator_c tLookup ( m_tDocidLookup.GetReadPtr() );
int iTotalKilled = 0;
RowID_t tRowID = INVALID_ROWID;
DocID_t tLastDocID = 0, tDocID = 0;
while ( tLookup.Read ( tDocID, tRowID ) )
{
if ( tDocID == tLastDocID )
{
m_tDeadRowMap.Set ( tRowID );
++iTotalKilled;
continue;
}
tLastDocID = tDocID;
}
if ( iTotalKilled )
m_uAttrsStatus |= IndexSegment_c::ATTRS_ROWMAP_UPDATED;
return iTotalKilled;
}
int CSphIndex_VLN::CheckThenKillMulti ( const VecTraits_T<DocID_t>& dKlist, BlockerFn&& fnWatcher )
{
LookupReaderIterator_c tTargetReader ( m_tDocidLookup.GetReadPtr() );
DocidListReader_c tKillerReader ( dKlist );
int iTotalKilled = ProcessIntersected ( tTargetReader, tKillerReader, [this,fnWatcher=std::move(fnWatcher)] ( RowID_t tRow, DocID_t tDoc )
{
if ( m_tDeadRowMap.IsSet ( tRow ) ) // already killed, nothing to do.
return false;
if ( !fnWatcher() )
return false;
Verify ( m_tDeadRowMap.Set ( tRow ) );
KillHook ( tDoc );
return true;
} );
if ( iTotalKilled )
m_uAttrsStatus |= IndexSegment_c::ATTRS_ROWMAP_UPDATED;
return iTotalKilled;
};
bool CSphIndex_VLN::IsQueryFast ( const CSphQuery & tQuery, const CSphVector<SecondaryIndexInfo_t> & dEnabledIndexes, float fCost ) const
{
const float COST_THRESH = 0.5f;
if ( tQuery.m_sQuery.IsEmpty() )
{
int iNumIterators = dEnabledIndexes.count_of ( [] ( const auto & tIndex ){ return tIndex.m_eType!=SecondaryIndexType_e::FILTER && tIndex.m_eType!=SecondaryIndexType_e::NONE; } );
int iNumFilters = dEnabledIndexes.count_of ( [] ( const auto & tIndex ){ return tIndex.m_eType==SecondaryIndexType_e::FILTER; } );
return iNumIterators && !iNumFilters && fCost<=COST_THRESH;
}
if ( m_pFieldFilter )
return false;
if ( !tQuery.m_sQueryTokenFilterName.IsEmpty() )
return false;
if ( m_tSettings.m_uAotFilterMask!=0 )
return false;
if ( m_tSettings.GetMinPrefixLen ( m_pDict->GetSettings().m_bWordDict )>0 )
return false;
if ( m_tSettings.m_iMinInfixLen>0 )
return false;
GetKeywordsSettings_t tSettings;
tSettings.m_bStats = true;
tSettings.m_iCutoff = 1;
CSphVector <CSphKeywordInfo> dKeywords;
if ( !GetKeywords ( dKeywords, tQuery.m_sQuery.cstr(), tSettings, nullptr ) )
return true;
if ( dKeywords.GetLength()<1 )
return true;
if ( dKeywords.GetLength()>1 )
return false;
const int DOCS_THRESH = 1024;
return dKeywords[0].m_iDocs<=DOCS_THRESH;
}
static bool CheckQueryFilters ( const CSphQuery & tQuery, const CSphSchema & tIndexSchema )
{
for ( auto & tFilter : tQuery.m_dFilters )
{
CommonFilterSettings_t tFixedSettings;
CSphString sError;
CreateFilterContext_t tCtx;
tCtx.m_pMatchSchema = &tIndexSchema;
tCtx.m_pIndexSchema = &tIndexSchema;
if ( !FixupFilterSettings ( tFilter, tFixedSettings, tCtx, tFilter.m_sAttrName, sError ) )
return false;
}
return true;
}
CSphVector<SecondaryIndexInfo_t> CSphIndex_VLN::GetEnabledIndexes ( const CSphQuery & tQuery, bool bFT, float & fCost, int iThreads ) const
{
// if there's a filter tree, we don't have any indexes and there's no point in wasting time to eval them
if ( tQuery.m_dFilterTree.GetLength() )
return {};
int iCutoff = ApplyImplicitCutoff ( tQuery, {}, bFT );
StrVec_t dWarnings;
SelectIteratorCtx_t tCtx ( tQuery, tQuery.m_dFilters, m_tSchema, m_tSchema, m_pHistograms, m_pColumnar.get(), m_tSI, iCutoff, m_iDocinfo, iThreads );
return SelectIterators ( tCtx, fCost, dWarnings );
}
std::pair<int64_t,int> CSphIndex_VLN::GetPseudoShardingMetric ( const VecTraits_T<const CSphQuery> & dQueries, const VecTraits_T<int64_t> & dMaxCountDistinct, int iThreads, bool & bForceSingleThread ) const
{
if ( MustRunInSingleThread ( dQueries, !m_tSI.IsEmpty(), dMaxCountDistinct, bForceSingleThread ) )
return { 0, 1 };
bool bAllFast = true;
int iThreadCap = 0;
int iNumProc = GetNumPhysicalCPUs();
if ( iNumProc==-1 )
iNumProc = GetNumLogicalCPUs()/2;
ARRAY_FOREACH ( i, dQueries )
{
auto & tQuery = dQueries[i];
// limit the number of threads for anything with FT as it looks better in average (some queries are faster without thread cap)
bool bFulltext = !tQuery.m_pQueryParser->IsFullscan(tQuery);
if ( bFulltext )
iThreadCap = iThreadCap ? Min ( iThreadCap, iNumProc ) : iNumProc;
if ( !tQuery.m_sKNNAttr.IsEmpty() )
iThreadCap = 1;
if ( !CheckQueryFilters ( tQuery, m_tSchema ) )
continue;
float fCost = FLT_MAX;
CSphVector<SecondaryIndexInfo_t> dEnabledIndexes = GetEnabledIndexes ( tQuery, bFulltext, fCost, iThreads );
bAllFast &= IsQueryFast ( tQuery, dEnabledIndexes, fCost );
// disable pseudo sharding if any of the queries use docid lookups
if ( dEnabledIndexes.any_of ( []( const SecondaryIndexInfo_t & tSI ){ return tSI.m_eType==SecondaryIndexType_e::LOOKUP; } ) )
return { 0, 1 };
// enable pseudo sharding but limit number of threads when we use SI in fullscan
if ( dEnabledIndexes.any_of ( []( const SecondaryIndexInfo_t & tSI ){ return tSI.m_eType==SecondaryIndexType_e::INDEX; } ) )
iThreadCap = iThreadCap ? Min ( iThreadCap, iNumProc ) : iNumProc;
}
if ( bAllFast )
return { 0, 1 };
return { GetStats().m_iTotalDocuments, iThreadCap };
}
int64_t CSphIndex_VLN::GetCountDistinct ( const CSphString & sAttr, CSphString & sModifiedAttr ) const
{
if ( m_tDeadRowMap.HasDead() )
return -1;
sModifiedAttr = sAttr;
if ( !m_tSchema.GetAttr ( sAttr.cstr() ) && sphJsonNameSplit ( sAttr.cstr() ) )
sModifiedAttr = UnifyJsonFieldName(sAttr);
return m_tSI.GetCountDistinct(sModifiedAttr);
}
int64_t CSphIndex_VLN::GetCountFilter ( const CSphFilterSettings & tFilter, CSphString & sModifiedAttr ) const
{
if ( m_tDeadRowMap.HasDead() )
return -1;
CSphFilterSettings tModifiedFilter = tFilter;
CSphQuery tQuery;
SelectIteratorCtx_t tCtx ( tQuery, tQuery.m_dFilters, m_tSchema, m_tSchema, m_pHistograms, m_pColumnar.get(), m_tSI, 0, m_iDocinfo, 1 );
sModifiedAttr = tFilter.m_sAttrName;
if ( !m_tSchema.GetAttr ( sModifiedAttr.cstr() ) && sphJsonNameSplit ( sModifiedAttr.cstr() ) )
{
tModifiedFilter.m_sAttrName = UnifyJsonFieldName(sModifiedAttr);
sModifiedAttr = tModifiedFilter.m_sAttrName;
}
if ( !tCtx.IsEnabled_SI(tModifiedFilter) )
return -1;
common::Filter_t tColumnarFilter;
CSphString sWarning;
if ( !ToColumnarFilter ( tColumnarFilter, tModifiedFilter, SPH_COLLATION_DEFAULT, m_tSchema, sWarning ) )
return -1;
uint32_t uCount = 0;
CSphString sError;
if ( !m_tSI.CalcCount ( uCount, tColumnarFilter, m_iDocinfo, sError ) )
return -1;
return uCount;
}
int64_t CSphIndex_VLN::GetCount() const
{
return m_iDocinfo - m_tDeadRowMap.GetNumDeads();
}
/////////////////////////////////////////////////////////////////////////////
struct CmpHit_fn
{
inline static bool IsLess ( const CSphWordHit & a, const CSphWordHit & b )
{
return ( a.m_uWordID<b.m_uWordID ) ||
( a.m_uWordID==b.m_uWordID && a.m_tRowID<b.m_tRowID ) ||
( a.m_uWordID==b.m_uWordID && a.m_tRowID==b.m_tRowID && HITMAN::GetPosWithField ( a.m_uWordPos )<HITMAN::GetPosWithField ( b.m_uWordPos ) );
}
};
void CSphIndex_VLN::GetIndexFiles ( StrVec_t& dFiles, StrVec_t& dExt, const FilenameBuilder_i* pParentFilenameBuilder ) const
{
if ( !m_pDict )
return;
std::unique_ptr<FilenameBuilder_i> pFilenameBuilder { nullptr };
if ( !pParentFilenameBuilder && GetIndexFilenameBuilder() )
{
pFilenameBuilder = GetIndexFilenameBuilder() ( GetName() );
pParentFilenameBuilder = pFilenameBuilder.get();
}
GetSettingsFiles ( m_pTokenizer, m_pDict, GetSettings(), pParentFilenameBuilder, dExt );
auto fnAddFile = [this,&dFiles] ( ESphExt eFile ) {
auto sFile = GetFilename ( eFile );
if ( sphIsReadable ( sFile.cstr() ) )
dFiles.Add ( std::move ( sFile ) );
};
for ( auto eExt : { SPH_EXT_SPH, SPH_EXT_SPD, SPH_EXT_SPP, SPH_EXT_SPE, SPH_EXT_SPI, SPH_EXT_SPM, SPH_EXT_SPK } )
fnAddFile ( eExt );
if ( m_uVersion >= 55 )
fnAddFile ( SPH_EXT_SPHI );
if ( m_uVersion >= 57 && ( m_tSchema.HasStoredFields() || m_tSchema.HasStoredAttrs() ) )
fnAddFile ( SPH_EXT_SPDS );
if ( m_uVersion >= 65 && m_tSchema.HasKNNAttrs() )
fnAddFile ( SPH_EXT_SPKNN );
if ( m_bIsEmpty )
return;
fnAddFile ( SPH_EXT_SPT );
if ( m_uVersion >= 64 )
fnAddFile ( SPH_EXT_SPIDX );
if ( m_uVersion >= 66 )
fnAddFile ( SPH_EXT_SPJIDX );
if ( m_tSchema.HasNonColumnarAttrs() )
fnAddFile ( SPH_EXT_SPA );
if ( m_tSchema.GetAttr ( sphGetBlobLocatorName() ) )
fnAddFile ( SPH_EXT_SPB );
if ( m_uVersion >= 63 && m_tSchema.HasColumnarAttrs() )
fnAddFile ( SPH_EXT_SPC );
}
void GetSettingsFiles ( const TokenizerRefPtr_c& pTok, const DictRefPtr_c& pDict, const CSphIndexSettings & tSettings, const FilenameBuilder_i* pFilenameBuilder, StrVec_t & dFiles )
{
assert ( pTok );
assert ( pDict );
StringBuilder_c sFiles ( "," );
sFiles << pDict->GetSettings().m_sStopwords << pTok->GetSettings().m_sSynonymsFile << tSettings.m_sHitlessFiles << tSettings.m_sJiebaUserDictPath;
auto dFileNames = sphSplit ( sFiles.cstr(), " \t," );
if ( pFilenameBuilder )
{
pDict->GetSettings().m_dWordforms.for_each ( [&] ( const auto& sFileName ) { dFiles.Add ( pFilenameBuilder->GetFullPath ( sFileName ) ); } );
dFileNames.for_each ( [&] ( const auto& sFileName ) { dFiles.Add ( pFilenameBuilder->GetFullPath ( sFileName ) ); } );
} else
{
pDict->GetSettings().m_dWordforms.for_each ( [&] ( const auto& sFileName ) { dFiles.Add ( sFileName ); } );
dFileNames.for_each ( [&] ( const auto& sFileName ) { dFiles.Add ( sFileName ); } );
}
}
class CSphHitBuilder
{
public:
CSphHitBuilder ( const CSphIndexSettings & tSettings, const CSphVector<SphWordID_t> & dHitless, bool bMerging, int iBufSize, DictRefPtr_c pDict, CSphString * sError, StrVec_t * pCreatedFiles );
bool CreateIndexFiles ( const CSphString& sDocName, const CSphString& sHitName, const CSphString& sSkipName, bool bInplace, int iWriteBuffer, CSphAutofile & tHit, SphOffset_t * pSharedOffset=nullptr );
void HitReset ();
void cidxHit ( AggregateHit_t * pHit );
bool cidxDone ( int iMemLimit, int & iMinInfixLen, int iMaxCodepointLen, DictHeader_t * pDictHeader );
int cidxWriteRawVLB ( int fd, CSphWordHit * pHit, int iHits );
SphOffset_t GetHitfilePos () const { return m_wrHitlist.GetPos (); }
void CloseHitlist () { m_wrHitlist.CloseFile (); }
bool IsError () const { return ( m_pDict->DictIsError() || m_wrDoclist.IsError() || m_wrHitlist.IsError() ); }
void HitblockBegin () { m_pDict->HitblockBegin(); }
bool IsWordDict () const { return m_pDict->GetSettings().m_bWordDict; }
private:
void DoclistBeginEntry ( RowID_t tDocid );
void DoclistEndEntry ( Hitpos_t uLastPos );
void DoclistEndList ();
CSphWriter m_wrDoclist; ///< wordlist writer
CSphWriter m_wrHitlist; ///< hitlist writer
CSphWriter m_wrSkiplist; ///< skiplist writer
CSphFixedVector<BYTE> m_dWriteBuffer; ///< my write buffer (for temp files)
AggregateHit_t m_tLastHit; ///< hitlist entry
Hitpos_t m_iPrevHitPos {0}; ///< previous hit position
bool m_bGotFieldEnd = false;
std::array<BYTE, MAX_KEYWORD_BYTES> m_sLastKeyword;
const CSphVector<SphWordID_t> & m_dHitlessWords;
DictRefPtr_c m_pDict;
CSphString * m_pLastError;
int m_iSkiplistBlockSize = 0;
SphOffset_t m_iLastHitlistPos = 0; ///< doclist entry
SphOffset_t m_iLastHitlistDelta = 0; ///< doclist entry
FieldMask_t m_dLastDocFields; ///< doclist entry
DWORD m_uLastDocHits = 0; ///< doclist entry
DictEntry_t m_tWord; ///< dictionary entry
ESphHitFormat m_eHitFormat;
ESphHitless m_eHitless;
CSphVector<SkiplistEntry_t> m_dSkiplist;
StrVec_t * m_pCreatedFiles { nullptr };
#ifndef NDEBUG
bool m_bMerging;
#endif
};
CSphHitBuilder::CSphHitBuilder ( const CSphIndexSettings & tSettings, const CSphVector<SphWordID_t> & dHitless, bool bMerging, int iBufSize, DictRefPtr_c pDict, CSphString * sError, StrVec_t * pCreatedFiles )
: m_dWriteBuffer ( iBufSize )
, m_dHitlessWords ( dHitless )
, m_pDict ( std::move ( pDict ) )
, m_pLastError ( sError )
, m_iSkiplistBlockSize ( tSettings.m_iSkiplistBlockSize )
, m_eHitFormat ( tSettings.m_eHitFormat )
, m_eHitless ( tSettings.m_eHitless )
, m_pCreatedFiles ( pCreatedFiles )
#ifndef NDEBUG
, m_bMerging ( bMerging )
#endif
{
m_sLastKeyword[0] = '\0';
HitReset();
m_dLastDocFields.UnsetAll();
assert ( m_pDict );
assert ( m_pLastError );
m_pDict->SetSkiplistBlockSize ( m_iSkiplistBlockSize );
}
bool CSphHitBuilder::CreateIndexFiles ( const CSphString& sDocName, const CSphString& sHitName, const CSphString& sSkipName, bool bInplace, int iWriteBuffer, CSphAutofile & tHit, SphOffset_t * pSharedOffset )
{
// doclist and hitlist files
m_wrDoclist.CloseFile();
m_wrHitlist.CloseFile();
m_wrSkiplist.CloseFile();
m_wrDoclist.SetBufferSize ( m_dWriteBuffer.GetLength() );
m_wrHitlist.SetBufferSize ( bInplace ? iWriteBuffer : m_dWriteBuffer.GetLength() );
if ( !m_wrDoclist.OpenFile ( sDocName, *m_pLastError ) )
return false;
if ( m_pCreatedFiles )
m_pCreatedFiles->Add ( m_wrDoclist.GetFilename() );
if ( bInplace )
{
sphSeek ( tHit.GetFD(), 0, SEEK_SET );
m_wrHitlist.SetFile ( tHit, pSharedOffset, *m_pLastError );
} else
{
if ( !m_wrHitlist.OpenFile ( sHitName, *m_pLastError ) )
return false;
if ( m_pCreatedFiles )
m_pCreatedFiles->Add ( m_wrHitlist.GetFilename() );
}
if ( !m_wrSkiplist.OpenFile ( sSkipName, *m_pLastError ) )
return false;
if ( m_pCreatedFiles )
m_pCreatedFiles->Add ( m_wrSkiplist.GetFilename() );
// put dummy byte (otherwise offset would start from 0, first delta would be 0
// and VLB encoding of offsets would fuckup)
BYTE bDummy = 1;
m_wrDoclist.PutBytes ( &bDummy, 1 );
m_wrHitlist.PutBytes ( &bDummy, 1 );
m_wrSkiplist.PutBytes ( &bDummy, 1 );
return true;
}
void CSphHitBuilder::HitReset()
{
m_tLastHit.m_tRowID = INVALID_ROWID;
m_tLastHit.m_uWordID = 0;
m_tLastHit.m_iWordPos = EMPTY_HIT;
m_tLastHit.m_szKeyword = m_sLastKeyword.data();
m_iPrevHitPos = 0;
m_bGotFieldEnd = false;
}
// doclist entry format
// (with the new and shiny "inline hit" format, that is)
//
// zint docid_delta
// zint doc_hits
// if doc_hits==1:
// zint field_pos
// zint field_no
// else:
// zint field_mask
// zint hlist_offset_delta
//
// so 4 bytes/doc minimum
// avg 4-6 bytes/doc according to our tests
void CSphHitBuilder::DoclistBeginEntry ( RowID_t tRowid )
{
assert ( m_iSkiplistBlockSize>0 );
// build skiplist
// that is, save decoder state and doclist position per every 128 documents
if ( ( m_tWord.m_iDocs & ( m_iSkiplistBlockSize-1 ) )==0 )
{
SkiplistEntry_t & tBlock = m_dSkiplist.Add();
tBlock.m_tBaseRowIDPlus1 = m_tLastHit.m_tRowID+1;
tBlock.m_iOffset = m_wrDoclist.GetPos();
tBlock.m_iBaseHitlistPos = m_iLastHitlistPos;
}
// begin doclist entry
m_wrDoclist.ZipInt ( tRowid - m_tLastHit.m_tRowID );
}
void CSphHitBuilder::DoclistEndEntry ( Hitpos_t uLastPos )
{
// end doclist entry
if ( m_eHitFormat==SPH_HIT_FORMAT_INLINE )
{
bool bIgnoreHits =
( m_eHitless==SPH_HITLESS_ALL ) ||
( m_eHitless==SPH_HITLESS_SOME && ( m_tWord.m_iDocs & HITLESS_DOC_FLAG ) );
// inline the only hit into doclist (unless it is completely discarded)
// and finish doclist entry
m_wrDoclist.ZipInt ( m_uLastDocHits );
if ( m_uLastDocHits==1 && !bIgnoreHits )
{
m_wrHitlist.SeekTo ( m_iLastHitlistPos );
m_wrDoclist.ZipInt ( uLastPos & 0x7FFFFF );
m_wrDoclist.ZipInt ( uLastPos >> 23 );
m_iLastHitlistPos -= m_iLastHitlistDelta;
assert ( m_iLastHitlistPos>=0 );
} else
{
m_wrDoclist.ZipInt ( m_dLastDocFields.GetMask32() );
m_wrDoclist.ZipOffset ( m_iLastHitlistDelta );
}
} else // plain format - finish doclist entry
{
assert ( m_eHitFormat==SPH_HIT_FORMAT_PLAIN );
m_wrDoclist.ZipOffset ( m_iLastHitlistDelta );
m_wrDoclist.ZipInt ( m_dLastDocFields.GetMask32() );
m_wrDoclist.ZipInt ( m_uLastDocHits );
}
m_dLastDocFields.UnsetAll();
m_uLastDocHits = 0;
// update keyword stats
m_tWord.m_iDocs++;
}
void CSphHitBuilder::DoclistEndList ()
{
assert ( m_iSkiplistBlockSize>0 );
// emit eof marker
m_wrDoclist.ZipInt ( 0 );
// emit skiplist
// OPTIMIZE? placing it after doclist means an extra seek on searching
// however placing it before means some (longer) doclist data moves while indexing
if ( m_tWord.m_iDocs>m_iSkiplistBlockSize )
{
assert ( m_dSkiplist.GetLength() );
assert ( m_dSkiplist[0].m_iOffset==m_tWord.m_iDoclistOffset );
assert ( m_dSkiplist[0].m_tBaseRowIDPlus1==0 );
assert ( m_dSkiplist[0].m_iBaseHitlistPos==0 );
m_tWord.m_iSkiplistOffset = m_wrSkiplist.GetPos();
// delta coding, but with a couple of skiplist specific tricks
// 1) first entry is omitted, it gets reconstructed from dict itself
// both base values are zero, and offset equals doclist offset
// 2) docids are at least SKIPLIST_BLOCK apart
// doclist entries are at least 4*SKIPLIST_BLOCK bytes apart
// so we additionally subtract that to improve delta coding
// 3) zero deltas are allowed and *not* used as any markers,
// as we know the exact skiplist entry count anyway
SkiplistEntry_t tLast = m_dSkiplist[0];
for ( int i=1; i<m_dSkiplist.GetLength(); i++ )
{
const SkiplistEntry_t & t = m_dSkiplist[i];
assert ( t.m_tBaseRowIDPlus1 - tLast.m_tBaseRowIDPlus1>=(DWORD)m_iSkiplistBlockSize );
assert ( t.m_iOffset - tLast.m_iOffset>=4*m_iSkiplistBlockSize );
m_wrSkiplist.ZipInt ( t.m_tBaseRowIDPlus1 - tLast.m_tBaseRowIDPlus1 - m_iSkiplistBlockSize );
m_wrSkiplist.ZipOffset ( t.m_iOffset - tLast.m_iOffset - 4*m_iSkiplistBlockSize );
m_wrSkiplist.ZipOffset ( t.m_iBaseHitlistPos - tLast.m_iBaseHitlistPos );
tLast = t;
}
}
// in any event, reset skiplist
m_dSkiplist.Resize ( 0 );
}
static int strcmpp (const char* l, const char* r)
{
const char* szEmpty = "";
if ( !l )
l = szEmpty;
if ( !r )
r = szEmpty;
return strcmp ( l, r );
}
void CSphHitBuilder::cidxHit ( AggregateHit_t * pHit )
{
assert (
( pHit->m_uWordID!=0 && pHit->m_iWordPos!=EMPTY_HIT && pHit->m_tRowID!=INVALID_ROWID ) || // it's either ok hit
( pHit->m_uWordID==0 && pHit->m_iWordPos==EMPTY_HIT ) ); // or "flush-hit"
/////////////
// next word
/////////////
const bool bNextWord = ( m_tLastHit.m_uWordID!=pHit->m_uWordID || ( m_pDict->GetSettings().m_bWordDict && strcmpp ( (const char*)m_tLastHit.m_szKeyword, (const char*)pHit->m_szKeyword ) ) ); // OPTIMIZE?
const bool bNextDoc = bNextWord || ( m_tLastHit.m_tRowID!=pHit->m_tRowID );
if ( m_bGotFieldEnd && ( bNextWord || bNextDoc ) )
{
// writing hits only without duplicates
assert ( HITMAN::GetPosWithField ( m_iPrevHitPos )!=HITMAN::GetPosWithField ( m_tLastHit.m_iWordPos ) );
HITMAN::SetEndMarker ( &m_tLastHit.m_iWordPos );
m_wrHitlist.ZipInt ( m_tLastHit.m_iWordPos - m_iPrevHitPos );
m_bGotFieldEnd = false;
}
if ( bNextDoc )
{
// finish hitlist, if any
Hitpos_t uLastPos = m_tLastHit.m_iWordPos;
if ( m_tLastHit.m_iWordPos!=EMPTY_HIT )
{
m_wrHitlist.ZipInt ( 0 );
m_tLastHit.m_iWordPos = EMPTY_HIT;
m_iPrevHitPos = EMPTY_HIT;
}
// finish doclist entry, if any
if ( m_tLastHit.m_tRowID!=INVALID_ROWID )
DoclistEndEntry ( uLastPos );
}
if ( bNextWord )
{
// finish doclist, if any
if ( m_tLastHit.m_tRowID!=INVALID_ROWID )
{
// emit end-of-doclist marker
DoclistEndList ();
// emit dict entry
m_tWord.m_uWordID = m_tLastHit.m_uWordID;
m_tWord.m_szKeyword = m_tLastHit.m_szKeyword;
m_tWord.m_iDoclistLength = m_wrDoclist.GetPos() - m_tWord.m_iDoclistOffset;
if ( m_tWord.m_iDocs )
m_pDict->DictEntry ( m_tWord );
// reset trackers
m_tWord.m_iDocs = 0;
m_tWord.m_iHits = 0;
m_tLastHit.m_tRowID = INVALID_ROWID;
m_iLastHitlistPos = 0;
}
// flush wordlist, if this is the end
if ( pHit->m_iWordPos==EMPTY_HIT )
{
m_pDict->DictEndEntries ( m_wrDoclist.GetPos() );
return;
}
#ifndef NDEBUG
assert ( pHit->m_uWordID > m_tLastHit.m_uWordID
|| ( m_pDict->GetSettings().m_bWordDict &&
pHit->m_uWordID==m_tLastHit.m_uWordID && strcmp ( (const char*)pHit->m_szKeyword, (const char*)m_tLastHit.m_szKeyword )>0 )
|| m_bMerging );
#endif // usually assert excluded in release, but this is 'paranoid' clause
m_tWord.m_iDoclistOffset = m_wrDoclist.GetPos();
m_tLastHit.m_uWordID = pHit->m_uWordID;
if ( m_pDict->GetSettings().m_bWordDict )
{
assert ( strlen ( (const char *)pHit->m_szKeyword )<sizeof(m_sLastKeyword)-1 );
strncpy ( (char*)const_cast<BYTE*>(m_tLastHit.m_szKeyword), (const char*)pHit->m_szKeyword, sizeof(m_sLastKeyword) ); // OPTIMIZE?
}
}
if ( bNextDoc )
{
// begin new doclist entry for new doc id
assert ( m_tLastHit.m_tRowID==INVALID_ROWID || pHit->m_tRowID>m_tLastHit.m_tRowID );
assert ( m_wrHitlist.GetPos()>=m_iLastHitlistPos );
DoclistBeginEntry ( pHit->m_tRowID );
m_iLastHitlistDelta = m_wrHitlist.GetPos() - m_iLastHitlistPos;
m_tLastHit.m_tRowID = pHit->m_tRowID;
m_iLastHitlistPos = m_wrHitlist.GetPos();
}
///////////
// the hit
///////////
if ( !pHit->m_dFieldMask.TestAll(false) ) // merge aggregate hits into the current hit
{
int iHitCount = pHit->GetAggrCount();
assert ( m_eHitless );
assert ( iHitCount );
assert ( !pHit->m_dFieldMask.TestAll(false) );
m_uLastDocHits += iHitCount;
for ( int i=0; i<FieldMask_t::SIZE; i++ )
m_dLastDocFields[i] |= pHit->m_dFieldMask[i];
m_tWord.m_iHits += iHitCount;
if ( m_eHitless==SPH_HITLESS_SOME )
m_tWord.m_iDocs |= HITLESS_DOC_FLAG;
} else // handle normal hits
{
Hitpos_t iHitPosPure = HITMAN::GetPosWithField ( pHit->m_iWordPos );
// skip any duplicates and keep only 1st position in place
// duplicates are hit with same position: [N, N] [N, N | FIELDEND_MASK] [N | FIELDEND_MASK, N] [N | FIELDEND_MASK, N | FIELDEND_MASK]
if ( iHitPosPure==m_tLastHit.m_iWordPos )
return;
// storing previous hit that might have a field end flag
if ( m_bGotFieldEnd )
{
if ( HITMAN::GetField ( pHit->m_iWordPos )!=HITMAN::GetField ( m_tLastHit.m_iWordPos ) ) // is field end flag real?
HITMAN::SetEndMarker ( &m_tLastHit.m_iWordPos );
m_wrHitlist.ZipInt ( m_tLastHit.m_iWordPos - m_iPrevHitPos );
m_bGotFieldEnd = false;
}
/* duplicate hits from duplicated documents
... 0x03, 0x03 ...
... 0x8003, 0x8003 ...
... 1, 0x8003, 0x03 ...
... 1, 0x03, 0x8003 ...
... 1, 0x8003, 0x04 ...
... 1, 0x03, 0x8003, 0x8003 ...
... 1, 0x03, 0x8003, 0x03 ...
*/
assert ( m_tLastHit.m_iWordPos < pHit->m_iWordPos );
// add hit delta without field end marker
// or postpone adding to hitlist till got another uniq hit
if ( iHitPosPure==pHit->m_iWordPos )
{
m_wrHitlist.ZipInt ( pHit->m_iWordPos - m_tLastHit.m_iWordPos );
m_tLastHit.m_iWordPos = pHit->m_iWordPos;
} else
{
assert ( HITMAN::IsEnd ( pHit->m_iWordPos ) );
m_bGotFieldEnd = true;
m_iPrevHitPos = m_tLastHit.m_iWordPos;
m_tLastHit.m_iWordPos = HITMAN::GetPosWithField ( pHit->m_iWordPos );
}
// update matched fields mask
m_dLastDocFields.Set ( HITMAN::GetField ( pHit->m_iWordPos ) );
m_uLastDocHits++;
m_tWord.m_iHits++;
}
}
static void ReadSchemaColumn ( CSphReader & rdInfo, CSphColumnInfo & tCol, DWORD uVersion )
{
tCol.m_sName = rdInfo.GetString ();
if ( tCol.m_sName.IsEmpty () )
tCol.m_sName = "@emptyname";
tCol.m_sName.ToLower ();
tCol.m_eAttrType = (ESphAttr) rdInfo.GetDword (); // FIXME? check/fixup?
rdInfo.GetDword (); // ignore rowitem
tCol.m_tLocator.m_iBitOffset = rdInfo.GetDword ();
tCol.m_tLocator.m_iBitCount = rdInfo.GetDword ();
tCol.m_bPayload = ( rdInfo.GetByte()!=0 );
if ( uVersion>=61 )
tCol.m_uAttrFlags = rdInfo.GetDword();
if ( uVersion>=63 )
tCol.m_eEngine = (AttrEngine_e)rdInfo.GetDword();
// WARNING! max version used here must be in sync with RtIndex_c::Prealloc
}
static void ReadSchemaField ( CSphReader & rdInfo, CSphColumnInfo & tCol, DWORD uVersion )
{
if ( uVersion>=57 )
{
tCol.m_sName = rdInfo.GetString();
tCol.m_uFieldFlags = rdInfo.GetDword();
tCol.m_bPayload = !!rdInfo.GetByte();
}
else
ReadSchemaColumn ( rdInfo, tCol, uVersion );
if ( uVersion<59 )
tCol.m_uFieldFlags |= CSphColumnInfo::FIELD_INDEXED;
}
void ReadSchema ( CSphReader & rdInfo, CSphSchema & tSchema, DWORD uVersion )
{
tSchema.Reset ();
int iNumFields = rdInfo.GetDword();
for ( int i=0; i<iNumFields; i++ )
{
CSphColumnInfo tCol;
ReadSchemaField ( rdInfo, tCol, uVersion );
tSchema.AddField ( tCol );
}
int iNumAttrs = rdInfo.GetDword();
for ( int i=0; i<iNumAttrs; i++ )
{
CSphColumnInfo tCol;
ReadSchemaColumn ( rdInfo, tCol, uVersion );
tSchema.AddAttr ( tCol, false );
}
}
static void ReadLocatorJson ( bson::Bson_c tNode, CSphAttrLocator & tLoc )
{
using namespace bson;
tLoc.m_iBitOffset = (int) Int ( tNode.ChildByName ( "pos" ) );
tLoc.m_iBitCount = (int) Int ( tNode.ChildByName ( "bits" ) );
}
static void ReadSchemaColumnJson ( bson::Bson_c tNode, CSphColumnInfo & tCol )
{
using namespace bson;
tCol.m_sName = String ( tNode.ChildByName ( "name" ), "@emptyname" );
tCol.m_sName.ToLower();
tCol.m_uAttrFlags = (DWORD)Int ( tNode.ChildByName ( "flags" ), CSphColumnInfo::ATTR_NONE );
tCol.m_bPayload = Bool ( tNode.ChildByName ( "payload" ), false );
tCol.m_eEngine = (AttrEngine_e)Int ( tNode.ChildByName ( "engine" ), (DWORD)AttrEngine_e::DEFAULT );
tCol.m_eAttrType = (ESphAttr)Int ( tNode.ChildByName ( "type" ) );
ReadLocatorJson ( tNode.ChildByName ("locator"), tCol.m_tLocator );
NodeHandle_t tKNN = tNode.ChildByName ("knn");
if ( tKNN!=nullnode )
tCol.m_tKNN = ReadKNNJson(tKNN);
}
static void ReadSchemaFieldJson ( bson::Bson_c tNode, CSphColumnInfo & tCol )
{
using namespace bson;
tCol.m_sName = String ( tNode.ChildByName ( "name" ) );
tCol.m_uFieldFlags = (DWORD)Int ( tNode.ChildByName ( "flags" ), CSphColumnInfo::FIELD_INDEXED );
tCol.m_bPayload = Bool ( tNode.ChildByName ( "payload" ) );
}
void ReadSchemaJson ( bson::Bson_c tNode, CSphSchema & tSchema )
{
using namespace bson;
tSchema.Reset ();
Bson_c ( tNode.ChildByName ( "fields" ) ).ForEach ( [&tSchema] ( const NodeHandle_t& tNode )
{
CSphColumnInfo tCol;
ReadSchemaFieldJson ( tNode, tCol );
tSchema.AddField ( tCol );
} );
Bson_c ( tNode.ChildByName ( "attributes" ) ).ForEach ( [&tSchema] ( const NodeHandle_t& tNode )
{
CSphColumnInfo tCol;
ReadSchemaColumnJson ( tNode, tCol );
tSchema.AddAttr ( tCol, false );
} );
}
static void WriteSchemaField ( CSphWriter & fdInfo, const CSphColumnInfo & tCol )
{
fdInfo.PutString ( tCol.m_sName );
fdInfo.PutDword ( tCol.m_uFieldFlags );
fdInfo.PutByte ( tCol.m_bPayload );
}
static void WriteSchemaColumn ( CSphWriter & fdInfo, const CSphColumnInfo & tCol )
{
fdInfo.PutString( tCol.m_sName );
fdInfo.PutDword ( tCol.m_eAttrType );
fdInfo.PutDword ( tCol.m_tLocator.CalcRowitem() ); // for backwards compatibility
fdInfo.PutDword ( tCol.m_tLocator.m_iBitOffset );
fdInfo.PutDword ( tCol.m_tLocator.m_iBitCount );
fdInfo.PutByte ( tCol.m_bPayload );
fdInfo.PutDword ( tCol.m_uAttrFlags );
fdInfo.PutDword ( (DWORD)tCol.m_eEngine );
}
void WriteSchema ( CSphWriter & fdInfo, const CSphSchema & tSchema )
{
fdInfo.PutDword ( tSchema.GetFieldsCount() );
for ( int i=0; i<tSchema.GetFieldsCount(); i++ )
WriteSchemaField ( fdInfo, tSchema.GetField(i) );
fdInfo.PutDword ( tSchema.GetAttrsCount() );
for ( int i=0; i<tSchema.GetAttrsCount(); i++ )
WriteSchemaColumn ( fdInfo, tSchema.GetAttr(i) );
}
void operator<< ( JsonEscapedBuilder& tOut, const CSphAttrLocator& tLoc )
{
auto _ = tOut.Object();
tOut.NamedVal ( "pos", tLoc.m_iBitOffset );
tOut.NamedVal ( "bits", tLoc.m_iBitCount );
}
namespace {
void DumpFieldToJson ( JsonEscapedBuilder& tOut, const CSphColumnInfo& tCol )
{
auto _ = tOut.Object();
tOut.NamedString ( "name", tCol.m_sName );
tOut.NamedValNonDefault ( "flags", tCol.m_uFieldFlags, (DWORD)CSphColumnInfo::FIELD_INDEXED );
tOut.NamedValNonDefault ( "payload", tCol.m_bPayload, false );
}
void DumpAttrToJson ( JsonEscapedBuilder& tOut, const CSphColumnInfo& tCol )
{
auto _ = tOut.Object();
tOut.NamedString ( "name", tCol.m_sName );
tOut.NamedValNonDefault ( "flags", tCol.m_uAttrFlags, (DWORD)CSphColumnInfo::ATTR_NONE );
tOut.NamedValNonDefault ( "payload", tCol.m_bPayload, false );
tOut.NamedValNonDefault ( "engine", (DWORD)tCol.m_eEngine, (DWORD)AttrEngine_e::DEFAULT );
tOut.NamedVal ( "type", tCol.m_eAttrType );
tOut.NamedVal ( "locator", tCol.m_tLocator );
if ( tCol.IsIndexedKNN() )
tOut.NamedVal ( "knn", tCol.m_tKNN );
}
} // namespace
void operator<< ( JsonEscapedBuilder& tOut, const CSphSchema& tSchema )
{
auto _ = tOut.ObjectW();
if ( tSchema.GetFieldsCount() > 0 )
{
tOut.Named ( "fields" );
auto _ = tOut.ArrayW();
for ( int i = 0; i < tSchema.GetFieldsCount(); ++i )
DumpFieldToJson ( tOut, tSchema.GetField ( i ) );
}
if ( tSchema.GetAttrsCount() > 0 )
{
tOut.Named ( "attributes" );
auto _ = tOut.ArrayW();
for ( int i = 0; i < tSchema.GetAttrsCount(); ++i )
DumpAttrToJson ( tOut, tSchema.GetAttr ( i ) );
}
}
bool IndexBuildDone ( const BuildHeader_t & tBuildHeader, const WriteHeader_t & tWriteHeader, const CSphString & sFileName, CSphString & sError )
{
JsonEscapedBuilder sJson;
IndexWriteHeader ( tBuildHeader, tWriteHeader, sJson, false );
CSphWriter wrHeaderJson;
if ( wrHeaderJson.OpenFile ( sFileName, sError ) )
{
wrHeaderJson.PutString ( (Str_t)sJson );
wrHeaderJson.CloseFile();
assert ( bson::ValidateJson ( sJson.cstr(), &sError ) );
return true;
}
sphWarning ( "failed to serialize header to json: %s", sError.cstr() );
return false;
}
bool CSphHitBuilder::cidxDone ( int iMemLimit, int & iMinInfixLen, int iMaxCodepointLen, DictHeader_t * pDictHeader )
{
assert ( pDictHeader );
if ( m_bGotFieldEnd )
{
HITMAN::SetEndMarker ( &m_tLastHit.m_iWordPos );
m_wrHitlist.ZipInt ( m_tLastHit.m_iWordPos - m_iPrevHitPos );
m_bGotFieldEnd = false;
}
// finalize dictionary
// in dict=crc mode, just flushes wordlist checkpoints
// in dict=keyword mode, also creates infix index, if needed
if ( iMinInfixLen>0 && m_pDict->GetSettings().m_bWordDict )
{
pDictHeader->m_iInfixCodepointBytes = iMaxCodepointLen;
if ( iMinInfixLen==1 )
{
sphWarn ( "min_infix_len must be greater than 1, changed to 2" );
iMinInfixLen = 2;
}
}
if ( !m_pDict->DictEnd ( pDictHeader, iMemLimit, *m_pLastError ) )
return false;
// close all data files
m_wrDoclist.CloseFile ();
m_wrHitlist.CloseFile ( true );
m_wrSkiplist.CloseFile ();
return !IsError();
}
inline int encodeKeyword ( BYTE * pBuf, const char * pKeyword )
{
auto iLen = (int) strlen ( pKeyword ); // OPTIMIZE! remove this and memcpy and check if thats faster
assert ( iLen>0 && iLen<128 ); // so that ReadVLB()
*pBuf = (BYTE) iLen;
memcpy ( pBuf+1, pKeyword, iLen );
return 1+iLen;
}
int CSphHitBuilder::cidxWriteRawVLB ( int fd, CSphWordHit * pHit, int iHits )
{
assert ( pHit );
assert ( iHits>0 );
///////////////////////////////////////
// encode through a small write buffer
///////////////////////////////////////
BYTE *pBuf, *maxP;
int n = 0, w;
SphWordID_t d1, l1 = 0;
RowID_t d2, l2 = (RowID_t)-1; // rowids start from 0 and we can't have delta=0
DWORD d3, l3 = 0; // !COMMIT must be wide enough
int iGap = (int)Max ( 16*sizeof(DWORD) + ( m_pDict->GetSettings().m_bWordDict ? MAX_KEYWORD_BYTES : 0 ), 128u );
pBuf = m_dWriteBuffer.Begin();
maxP = m_dWriteBuffer.Begin() + m_dWriteBuffer.GetLength() - iGap;
// hit aggregation state
DWORD uHitCount = 0;
DWORD uHitFieldMask = 0;
const int iPositionShift = m_eHitless==SPH_HITLESS_SOME ? 1 : 0;
while ( iHits-- )
{
// calc deltas
d1 = pHit->m_uWordID - l1;
d2 = pHit->m_tRowID - l2;
d3 = pHit->m_uWordPos - l3;
// ignore duplicate hits
if ( d1==0 && d2==0 && d3==0 ) // OPTIMIZE? check if ( 0==(d1|d2|d3) ) is faster
{
pHit++;
continue;
}
// checks below are intended handle several "fun" cases
//
// case 1, duplicate documents (same docid), different field contents, but ending with
// the same keyword, ending up in multiple field end markers within the same keyword
// eg. [foo] in positions {1, 0x800005} in 1st dupe, {3, 0x800007} in 2nd dupe
//
// case 2, blended token in the field end, duplicate parts, different positions (as expected)
// for those parts but still multiple field end markers, eg. [U.S.S.R.] in the end of field
// replacement of hit itself by field-end form
if ( d1==0 && d2==0 && HITMAN::GetPosWithField ( pHit->m_uWordPos )==HITMAN::GetPosWithField ( l3 ) )
{
l3 = pHit->m_uWordPos;
pHit++;
continue;
}
// reset field-end inside token stream due of document duplicates
if ( d1==0 && d2==0 && HITMAN::IsEnd ( l3 ) && HITMAN::GetField ( pHit->m_uWordPos )==HITMAN::GetField ( l3 ) )
{
l3 = HITMAN::GetPosWithField ( l3 );
d3 = HITMAN::GetPosWithField ( pHit->m_uWordPos ) - l3;
if ( d3==0 )
{
pHit++;
continue;
}
}
// non-zero delta restarts all the fields after it
// because their deltas might now be negative
if ( d1 ) d2 = pHit->m_tRowID+1;
if ( d2 ) d3 = pHit->m_uWordPos;
// when we moved to the next word or document
bool bFlushed = false;
if ( d1 || d2 )
{
// flush previous aggregate hit
if ( uHitCount )
{
// we either skip all hits or the high bit must be available for marking
// failing that, we can't produce a consistent index
assert ( m_eHitless!=SPH_HITLESS_NONE );
assert ( m_eHitless==SPH_HITLESS_ALL || !( uHitCount & 0x80000000UL ) );
if ( m_eHitless!=SPH_HITLESS_ALL )
uHitCount = ( uHitCount << 1 ) | 1;
pBuf += ZipToPtrLE ( pBuf, uHitCount );
pBuf += ZipToPtrLE ( pBuf, uHitFieldMask );
assert ( pBuf<m_dWriteBuffer.Begin() + m_dWriteBuffer.GetLength() );
uHitCount = 0;
uHitFieldMask = 0;
bFlushed = true;
}
// start aggregating if we're skipping all hits or this word is in a list of ignored words
if ( ( m_eHitless==SPH_HITLESS_ALL ) ||
( m_eHitless==SPH_HITLESS_SOME && m_dHitlessWords.BinarySearch ( pHit->m_uWordID ) ) )
{
uHitCount = 1;
uHitFieldMask |= 1 << HITMAN::GetField ( pHit->m_uWordPos );
}
} else if ( uHitCount ) // next hit for the same word/doc pair, update state if we need it
{
uHitCount++;
uHitFieldMask |= 1 << HITMAN::GetField ( pHit->m_uWordPos );
}
// encode enough restart markers
if ( d1 ) pBuf += ZipToPtrLE ( pBuf, 0 );
if ( d2 && !bFlushed ) pBuf += ZipToPtrLE ( pBuf, 0 );
assert ( pBuf<m_dWriteBuffer.Begin() + m_dWriteBuffer.GetLength() );
// encode deltas
// encode keyword
if ( d1 )
{
if ( m_pDict->GetSettings().m_bWordDict )
pBuf += encodeKeyword ( pBuf, m_pDict->HitblockGetKeyword ( pHit->m_uWordID ) ); // keyword itself in case of keywords dict
else
pBuf += ZipToPtrLE ( pBuf, d1 ); // delta in case of CRC dict
assert ( pBuf<m_dWriteBuffer.Begin() + m_dWriteBuffer.GetLength() );
}
// encode docid delta
if ( d2 )
{
pBuf += ZipToPtrLE ( pBuf, d2 );
assert ( pBuf<m_dWriteBuffer.Begin() + m_dWriteBuffer.GetLength() );
}
assert ( d3 );
if ( !uHitCount ) // encode position delta, unless accumulating hits
{
pBuf += ZipToPtrLE ( pBuf, d3 << iPositionShift );
assert ( pBuf<m_dWriteBuffer.Begin() + m_dWriteBuffer.GetLength() );
}
// update current state
l1 = pHit->m_uWordID;
l2 = pHit->m_tRowID;
l3 = pHit->m_uWordPos;
pHit++;
if ( pBuf>maxP )
{
w = (int)(pBuf - m_dWriteBuffer.Begin());
assert ( w<m_dWriteBuffer.GetLength() );
if ( !sphWriteThrottled ( fd, m_dWriteBuffer.Begin(), w, "raw_hits", *m_pLastError ) )
return -1;
n += w;
pBuf = m_dWriteBuffer.Begin();
}
}
// flush last aggregate
if ( uHitCount )
{
assert ( m_eHitless!=SPH_HITLESS_NONE );
assert ( m_eHitless==SPH_HITLESS_ALL || !( uHitCount & 0x80000000UL ) );
if ( m_eHitless!=SPH_HITLESS_ALL )
uHitCount = ( uHitCount << 1 ) | 1;
pBuf += ZipToPtrLE ( pBuf, uHitCount );
pBuf += ZipToPtrLE ( pBuf, uHitFieldMask );
assert ( pBuf<m_dWriteBuffer.Begin() + m_dWriteBuffer.GetLength() );
}
pBuf += ZipToPtrLE ( pBuf, 0 );
pBuf += ZipToPtrLE ( pBuf, 0 );
pBuf += ZipToPtrLE ( pBuf, 0 );
assert ( pBuf<m_dWriteBuffer.Begin() + m_dWriteBuffer.GetLength() );
w = (int)(pBuf - m_dWriteBuffer.Begin());
assert ( w<m_dWriteBuffer.GetLength() );
if ( !sphWriteThrottled ( fd, m_dWriteBuffer.Begin(), w, "raw_hits", *m_pLastError ) )
return -1;
n += w;
return n;
}
/////////////////////////////////////////////////////////////////////////////
// OPTIMIZE?
inline bool SPH_CMPAGGRHIT_LESS ( const AggregateHit_t & a, const AggregateHit_t & b )
{
if ( a.m_uWordID < b.m_uWordID )
return true;
if ( a.m_uWordID > b.m_uWordID )
return false;
if ( a.m_szKeyword )
{
int iCmp = strcmp ( (const char*)a.m_szKeyword, (const char*)b.m_szKeyword ); // OPTIMIZE?
if ( iCmp!=0 )
return ( iCmp<0 );
}
return
( a.m_tRowID < b.m_tRowID ) ||
( a.m_tRowID==b.m_tRowID && HITMAN::GetPosWithField ( a.m_iWordPos )<HITMAN::GetPosWithField ( b.m_iWordPos ) );
}
/// hit priority queue entry
struct CSphHitQueueEntry : public AggregateHit_t // fixme! used for build
{
int m_iBin;
};
/// hit priority queue
struct CSphHitQueue // fixme! used for build
{
public:
CSphHitQueueEntry * m_pData;
int m_iSize;
int m_iUsed;
public:
/// create queue
explicit CSphHitQueue ( int iSize )
{
assert ( iSize>0 );
m_iSize = iSize;
m_iUsed = 0;
m_pData = new CSphHitQueueEntry [ iSize ];
}
/// destroy queue
~CSphHitQueue ()
{
SafeDeleteArray ( m_pData );
}
/// add entry to the queue
void Push ( AggregateHit_t & tHit, int iBin )
{
// check for overflow and do add
assert ( m_iUsed<m_iSize );
auto & tEntry = m_pData[m_iUsed];
tEntry.m_tRowID = tHit.m_tRowID;
tEntry.m_uWordID = tHit.m_uWordID;
tEntry.m_szKeyword = tHit.m_szKeyword; // bin must hold the actual data for the queue
tEntry.m_iWordPos = tHit.m_iWordPos;
tEntry.m_dFieldMask = tHit.m_dFieldMask;
tEntry.m_iBin = iBin;
int iEntry = m_iUsed++;
// sift up if needed
while ( iEntry )
{
int iParent = ( iEntry-1 ) >> 1;
if ( SPH_CMPAGGRHIT_LESS ( m_pData[iEntry], m_pData[iParent] ) )
{
// entry is less than parent, should float to the top
Swap ( m_pData[iEntry], m_pData[iParent] );
iEntry = iParent;
} else
break;
}
}
/// remove root (ie. top priority) entry
void Pop ()
{
assert ( m_iUsed );
if ( !(--m_iUsed) ) // empty queue? just return
return;
// make the last entry my new root
m_pData[0] = m_pData[m_iUsed];
// sift down if needed
int iEntry = 0;
while (true)
{
// select child
int iChild = (iEntry<<1) + 1;
if ( iChild>=m_iUsed )
break;
// select smallest child
if ( iChild+1<m_iUsed )
if ( SPH_CMPAGGRHIT_LESS ( m_pData[iChild+1], m_pData[iChild] ) )
iChild++;
// if smallest child is less than entry, do float it to the top
if ( SPH_CMPAGGRHIT_LESS ( m_pData[iChild], m_pData[iEntry] ) )
{
Swap ( m_pData[iChild], m_pData[iEntry] );
iEntry = iChild;
continue;
}
break;
}
}
};
static const int MIN_KEYWORDS_DICT = 4*1048576; // FIXME! ideally must be in sync with impl (ENTRY_CHUNKS, KEYWORD_CHUNKS)
/////////////////////////////////////////////////////////////////////////////
bool CSphIndex_VLN::RelocateBlock ( int iFile, BYTE * pBuffer, int iRelocationSize,
SphOffset_t * pFileSize, CSphBin & dMinBin, SphOffset_t * pSharedOffset ) // build only
{
assert ( pBuffer && pFileSize && pSharedOffset );
SphOffset_t iBlockStart = dMinBin.m_iFilePos;
SphOffset_t iBlockLeft = dMinBin.m_iFileLeft;
ESphBinRead eRes = dMinBin.Precache ();
switch ( eRes )
{
case BIN_PRECACHE_OK:
return true;
case BIN_READ_ERROR:
m_sLastError = "block relocation: preread error";
return false;
default:
break;
}
int nTransfers = (int)( ( iBlockLeft+iRelocationSize-1) / iRelocationSize );
SphOffset_t uTotalRead = 0;
SphOffset_t uNewBlockStart = *pFileSize;
for ( int i = 0; i < nTransfers; i++ )
{
if ( !SeekAndWarn ( iFile, iBlockStart + uTotalRead, "block relocation" ))
return false;
int iToRead = i==nTransfers-1 ? (int)( iBlockLeft % iRelocationSize ) : iRelocationSize;
size_t iRead = sphReadThrottled ( iFile, pBuffer, iToRead );
if ( iRead!=size_t(iToRead) )
{
m_sLastError.SetSprintf ( "block relocation: read error (%d of %d bytes read): %s", (int)iRead, iToRead, strerrorm(errno) );
return false;
}
if ( !SeekAndWarn ( iFile, *pFileSize, "block relocation" ))
return false;
uTotalRead += iToRead;
if ( !sphWriteThrottled ( iFile, pBuffer, iToRead, "block relocation", m_sLastError ) )
return false;
*pFileSize += iToRead;
}
assert ( uTotalRead==iBlockLeft );
// update block pointers
dMinBin.m_iFilePos = uNewBlockStart;
*pSharedOffset = *pFileSize;
return true;
}
bool LoadHitlessWords ( const CSphString & sHitlessFiles, const TokenizerRefPtr_c& pTok, const DictRefPtr_c& pDict, CSphVector<SphWordID_t> & dHitlessWords, CSphString & sError )
{
assert ( dHitlessWords.GetLength()==0 );
if ( sHitlessFiles.IsEmpty() )
return true;
StrVec_t dFiles;
sphSplit ( dFiles, sHitlessFiles.cstr(), ", " );
for ( const CSphString & sFilename : dFiles )
{
CSphAutofile tFile ( sFilename.cstr(), SPH_O_READ, sError );
if ( tFile.GetFD()==-1 )
return false;
CSphVector<BYTE> dBuffer ( (int)tFile.GetSize() );
if ( !tFile.Read ( &dBuffer[0], dBuffer.GetLength(), sError ) )
return false;
// FIXME!!! dict=keywords + hitless_words=some
pTok->SetBuffer ( &dBuffer[0], dBuffer.GetLength() );
while ( BYTE * sToken = pTok->GetToken() )
dHitlessWords.Add ( pDict->GetWordID ( sToken ) );
}
dHitlessWords.Uniq();
return true;
}
bool CSphIndex_VLN::Build_CollectQueryMvas ( const CSphVector<CSphSource*> & dSources, QueryMvaContainer_c & tMvaContainer )
{
CSphBitvec dQueryMvas ( m_tSchema.GetAttrsCount() );
for ( int i=0; i<m_tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = m_tSchema.GetAttr(i);
if ( tAttr.m_eSrc!=SPH_ATTRSRC_FIELD && ( tAttr.m_eAttrType==SPH_ATTR_UINT32SET || tAttr.m_eAttrType==SPH_ATTR_INT64SET ) )
dQueryMvas.BitSet(i);
}
if ( !dQueryMvas.BitCount() )
return true;
assert ( !tMvaContainer.m_tContainer.GetLength() );
tMvaContainer.m_tContainer.Resize ( m_tSchema.GetAttrsCount() );
for ( auto & i : tMvaContainer.m_tContainer )
i = nullptr;
for ( auto & pSource : dSources )
{
assert ( pSource );
if ( !pSource->Connect ( m_sLastError ) )
return false;
for ( int i=0; i<m_tSchema.GetAttrsCount(); i++ )
{
if ( !dQueryMvas.BitGet(i) )
continue;
auto * & pHash = tMvaContainer.m_tContainer[i];
if ( !pHash )
pHash = new OpenHashTable_T<int64_t, CSphVector<int64_t>>;
if ( !pSource->IterateMultivaluedStart ( i, m_sLastError ) )
return false;
int64_t iDocID;
int64_t iMvaValue;
while ( pSource->IterateMultivaluedNext ( iDocID, iMvaValue) )
{
auto & tMva = pHash->Acquire ( iDocID );
tMva.Add ( iMvaValue );
}
}
pSource->Disconnect ();
}
return true;
}
bool CSphIndex_VLN::Build_CollectJoinedFields ( const CSphVector<CSphSource*> & dSources, CSphAutofile & tFile, CSphVector<std::unique_ptr<OpenHashTable_T<uint64_t, uint64_t>>> & dJoinedOffsets )
{
for ( auto & pSource : dSources )
{
assert ( pSource );
if ( !pSource->Connect ( m_sLastError ) )
return false;
if ( !pSource->FetchJoinedFields ( tFile, dJoinedOffsets, m_sLastError ) )
return false;
pSource->Disconnect();
}
return true;
}
struct Mva32Uniq_fn
{
bool IsLess ( const uint64_t & iA, const uint64_t & iB ) const
{
DWORD uA = (DWORD)iA;
DWORD uB = (DWORD)iB;
return uA<uB;
}
bool IsEq ( const uint64_t & iA, const uint64_t & iB ) const
{
DWORD uA = (DWORD)iA;
DWORD uB = (DWORD)iB;
return uA==uB;
}
};
static void SortMva ( CSphVector<int64_t> & tMva, bool bMva32 )
{
if ( !bMva32 )
{
tMva.Uniq();
return;
}
tMva.Sort ( Mva32Uniq_fn() );
int iLeft = sphUniq ( tMva.Begin(), tMva.GetLength(), Mva32Uniq_fn() );
tMva.Resize ( iLeft );
}
static const CSphVector<int64_t> * FetchMVA ( DocID_t tDocId, int iAttr, const CSphColumnInfo & tAttr, QueryMvaContainer_c & tMvaContainer, AttrSource_i & tSource, bool bForceSource )
{
CSphVector<int64_t> * pMva = nullptr;
if ( tAttr.m_eSrc==SPH_ATTRSRC_FIELD || bForceSource )
pMva = tSource.GetFieldMVA(iAttr);
else
{
assert ( tMvaContainer.m_tContainer[iAttr] );
pMva = tMvaContainer.m_tContainer[iAttr]->Find(tDocId);
}
if ( pMva )
SortMva ( *pMva, ( tAttr.m_eAttrType==SPH_ATTR_UINT32SET ) );
return pMva;
}
bool CSphIndex_VLN::Build_StoreBlobAttrs ( DocID_t tDocId, std::pair<SphOffset_t,SphOffset_t> & tOffsetSize, BlobRowBuilder_i & tBlobRowBuilder, QueryMvaContainer_c & tMvaContainer, AttrSource_i & tSource, bool bForceSource )
{
CSphString sError;
int iBlobAttr = 0;
for ( int i=0; i < m_tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = m_tSchema.GetAttr(i);
if ( !sphIsBlobAttr(tAttr) )
continue;
bool bOk = true;
bool bFatal = false;
switch ( tAttr.m_eAttrType )
{
case SPH_ATTR_UINT32SET:
case SPH_ATTR_INT64SET:
{
const CSphVector<int64_t> * pMva = FetchMVA ( tDocId, i, tAttr, tMvaContainer, tSource, bForceSource );
bOk = tBlobRowBuilder.SetAttr ( iBlobAttr++, pMva ? (const BYTE*)(pMva->Begin()) : nullptr, pMva ? pMva->GetLength()*sizeof(int64_t) : 0, sError );
}
break;
case SPH_ATTR_STRING:
case SPH_ATTR_JSON:
{
const CSphString & sStrAttr = tSource.GetStrAttr(i);
bOk = tBlobRowBuilder.SetAttr ( iBlobAttr++, (const BYTE*)sStrAttr.cstr(), sStrAttr.Length(), sError );
if ( !bOk )
bFatal = tAttr.m_eAttrType==SPH_ATTR_JSON && g_bJsonStrict;
}
break;
default:
break;
}
if ( !bOk )
{
sError.SetSprintf ( "document " INT64_FMT ", attribute %s: %s", tDocId, tAttr.m_sName.cstr(), sError.cstr() );
if ( bFatal )
{
m_sLastError = sError;
return false;
}
else
sphWarning ( "%s", sError.cstr() );
}
}
tOffsetSize = tBlobRowBuilder.Flush();
return true;
}
template<typename T>
static void Builder_StoreAttrs ( const CSphSchema & tSchema, DocID_t tDocId, CSphSource & tSource, QueryMvaContainer_c & tMvaContainer, T * pBuilder, const CSphBitvec & tAttrsUsed )
{
int iAttrId = 0;
for ( int i=0; i < tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = tSchema.GetAttr(i);
if ( !tAttrsUsed.BitGet(i) )
continue;
switch ( tAttr.m_eAttrType )
{
case SPH_ATTR_STRING:
{
const CSphString & sStrAttr = tSource.GetStrAttr(i);
pBuilder->SetAttr ( iAttrId, (const BYTE*)sStrAttr.cstr(), sStrAttr.Length() );
}
break;
case SPH_ATTR_UINT32SET:
case SPH_ATTR_INT64SET:
{
const CSphVector<int64_t> * pMva = FetchMVA ( tDocId, i, tAttr, tMvaContainer, tSource, false );
pBuilder->SetAttr ( iAttrId, pMva ? pMva->Begin() : nullptr, pMva ? pMva->GetLength() : 0 );
}
break;
default:
pBuilder->SetAttr ( iAttrId, tSource.GetAttr(i) );
break;
}
iAttrId++;
}
}
static void BuildStoreHistograms ( const CSphSchema & tSchema, DocID_t tDocId, CSphSource & tSource, QueryMvaContainer_c & tMvaContainer, CSphVector<HistogramSource_t> & dHistograms )
{
for ( auto & tItem : dHistograms )
{
switch ( tItem.m_eAttrType )
{
case SPH_ATTR_STRING:
{
const CSphString & sVal = tSource.GetStrAttr ( tItem.m_iAttr );
int iLen = sVal.Length();
tItem.m_pHist->Insert ( iLen ? LibcCIHash_fn::Hash ( (const BYTE*)sVal.scstr(), iLen ) : 0 );
}
break;
case SPH_ATTR_UINT32SET:
case SPH_ATTR_INT64SET:
{
const CSphVector<int64_t> * pMva = FetchMVA ( tDocId, tItem.m_iAttr, tSchema.GetAttr ( tItem.m_iAttr ), tMvaContainer, tSource, false );
if ( pMva )
{
for ( int64_t tVal : *pMva )
tItem.m_pHist->Insert ( tVal );
}
}
break;
default:
tItem.m_pHist->Insert ( tSource.GetAttr ( tItem.m_iAttr ) );
break;
}
}
}
template <typename T>
void SourceCopyMva ( const BYTE * pData, int iLenBytes, CSphVector<int64_t> & dDst )
{
const T * pSrc = (const T *)pData;
int iValues = iLenBytes / sizeof(T);
dDst.Resize ( iValues );
int64_t * pDst = dDst.Begin();
const int64_t * pDstEnd = pDst + iValues;
while ( pDst<pDstEnd )
{
*pDst = sphUnalignedRead ( *pSrc );
pSrc++;
pDst++;
}
}
static void ResetFileAccess ( CSphIndex * pIndex )
{
MutableIndexSettings_c tMutable = pIndex->GetMutableSettings();
tMutable.m_tFileAccess = FileAccessSettings_t();
pIndex->SetMutableSettings ( tMutable );
}
class KeepAttrs_c : public AttrSource_i
{
public:
explicit KeepAttrs_c ( QueryMvaContainer_c & tMvaContainer )
: m_pIndex ( nullptr )
, m_tMvaContainer ( tMvaContainer )
{}
void SetBlobSource ( AttrSource_i * pSource )
{
m_pBlobSource = pSource;
}
bool Init ( const CSphString & sKeepAttrs, const StrVec_t & dKeepAttrs, const CSphSchema & tSchema )
{
if ( sKeepAttrs.IsEmpty() && !dKeepAttrs.GetLength() )
return false;
m_pBlobRowLocator = tSchema.GetAttr ( sphGetBlobLocatorName() );
m_bHasBlobAttrs = tSchema.HasBlobAttrs();
m_iStride = tSchema.GetRowSize();
CSphString sError;
StrVec_t dWarnings;
m_pIndex = std::make_unique<CSphIndex_VLN> ( "keep-attrs", sKeepAttrs.cstr() );
ResetFileAccess ( m_pIndex.get() );
if ( !m_pIndex->Prealloc ( false, nullptr, dWarnings ) )
{
if ( !m_pIndex->GetLastError().IsEmpty() )
sError.SetSprintf ( "%s error: '%s'", sError.scstr(), m_pIndex->GetLastError().cstr() );
sphWarn ( "unable to load 'keep-attrs' table (%s); ignoring --keep-attrs", sError.cstr() );
m_pIndex.reset();
} else
{
// check schema
if ( !tSchema.CompareTo ( m_pIndex->GetMatchSchema(), sError, false ) )
{
sphWarn ( "schemas are different (%s); ignoring --keep-attrs", sError.cstr() );
m_pIndex.reset();
}
}
for ( const auto & i : dWarnings )
sphWarn ( "%s", i.cstr() );
if ( m_pIndex )
{
if ( dKeepAttrs.GetLength() )
{
m_dLocMva.Init ( tSchema.GetAttrsCount() );
m_dLocString.Init ( tSchema.GetAttrsCount() );
m_bKeepSomeAttrs = true;
ARRAY_FOREACH ( i, dKeepAttrs )
{
int iCol = tSchema.GetAttrIndex ( dKeepAttrs[i].cstr() );
if ( iCol==-1 )
{
sphWarn ( "no attribute found '%s'; ignoring --keep-attrs", dKeepAttrs[i].cstr() );
m_pIndex.reset();
break;
}
const CSphColumnInfo & tCol = tSchema.GetAttr ( iCol );
if ( tCol.m_eAttrType==SPH_ATTR_UINT32SET || tCol.m_eAttrType==SPH_ATTR_INT64SET )
{
m_dLocMva.BitSet ( iCol );
m_bHasMva = true;
} else if ( tCol.m_eAttrType==SPH_ATTR_STRING || tCol.m_eAttrType==SPH_ATTR_JSON )
{
m_dLocString.BitSet ( iCol );
m_bHasString = true;
} else
{
m_dLocPlain.Add ( tCol.m_tLocator );
}
}
}
m_dMvaField.Init ( tSchema.GetAttrsCount() );
for ( int i=0; i<tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tCol = tSchema.GetAttr ( i );
if ( ( tCol.m_eAttrType==SPH_ATTR_UINT32SET || tCol.m_eAttrType==SPH_ATTR_INT64SET ) && tCol.m_eSrc==SPH_ATTRSRC_FIELD )
{
m_dMvaField.BitSet ( i );
}
}
}
if ( m_pIndex )
m_pIndex->Preread();
return ( m_pIndex!=nullptr );
}
bool Keep ( DocID_t tDocid )
{
if ( !m_pIndex )
return false;
m_tDocid = tDocid;
m_pRow = m_pIndex->FindDocinfo ( tDocid );
return ( m_pRow!=nullptr );
}
SphAttr_t GetAttr ( int iAttr ) override
{
assert ( 0 && "internal error" );
return 0;
}
CSphVector<int64_t> * GetFieldMVA ( int iAttr ) override
{
if ( !m_pIndex || !m_pRow )
return nullptr;
// fallback to indexed data
if ( m_bKeepSomeAttrs && ( !m_bHasMva || !m_dLocMva.BitGet ( iAttr ) ) )
{
if ( m_dMvaField.BitGet ( iAttr ) )
{
return m_pBlobSource->GetFieldMVA ( iAttr );
} else
{
assert ( m_tMvaContainer.m_tContainer[iAttr] );
return m_tMvaContainer.m_tContainer[iAttr]->Find ( m_tDocid );
}
}
int iLen = 0;
ESphAttr eAttr = SPH_ATTR_NONE;
const BYTE * pData = GetBlobData ( iAttr, iLen, eAttr );
assert ( eAttr==SPH_ATTR_UINT32SET || eAttr==SPH_ATTR_INT64SET );
if ( eAttr==SPH_ATTR_INT64SET )
{
SourceCopyMva<int64_t> ( pData, iLen, m_dDataMva );
} else
{
SourceCopyMva<DWORD> ( pData, iLen, m_dDataMva );
}
return &m_dDataMva;
}
/// returns string attributes for a given attribute
virtual const CSphString & GetStrAttr ( int iAttr ) override
{
if ( !m_pIndex || !m_pRow )
return m_sEmpty;
// fallback to indexed data
if ( m_bKeepSomeAttrs && ( !m_bHasString || !m_dLocString.BitGet ( iAttr ) ) )
{
assert ( m_pBlobSource );
return m_pBlobSource->GetStrAttr ( iAttr );
}
int iLen = 0;
ESphAttr eAttr = SPH_ATTR_NONE;
const BYTE * pData = GetBlobData ( iAttr, iLen, eAttr );
assert ( eAttr==SPH_ATTR_STRING || eAttr==SPH_ATTR_JSON );
if ( !iLen )
return m_sEmpty;
m_sDataString.SetBinary ( (const char *)pData, iLen );
return m_sDataString;
}
const CSphRowitem * GetRow ( CSphRowitem * pSrc )
{
if ( !m_pIndex || !m_pRow )
return pSrc;
// keep only blob attributes
if ( m_bKeepSomeAttrs && !m_dLocPlain.GetLength() )
return pSrc;
if ( m_bKeepSomeAttrs )
{
// keep only some plain attributes
ARRAY_FOREACH ( i, m_dLocPlain )
{
const CSphAttrLocator & tLoc = m_dLocPlain[i];
SphAttr_t tAtrr = sphGetRowAttr ( m_pRow, tLoc );
sphSetRowAttr ( pSrc, tLoc, tAtrr );
}
return pSrc;
}
else if ( m_bHasBlobAttrs )
{
// copy whole row except blob row offset
assert(m_pBlobRowLocator);
SphOffset_t tOffset = sphGetRowAttr ( pSrc, m_pBlobRowLocator->m_tLocator );
memcpy ( pSrc, m_pRow, m_iStride*sizeof(CSphRowitem) );
sphSetRowAttr ( pSrc, m_pBlobRowLocator->m_tLocator, tOffset );
return pSrc;
}
// keep whole row
return m_pRow;
}
void Reset()
{
m_pIndex.reset();
}
private:
std::unique_ptr<CSphIndex_VLN> m_pIndex;
CSphVector<CSphAttrLocator> m_dLocPlain;
CSphBitvec m_dLocMva;
CSphBitvec m_dMvaField;
CSphBitvec m_dLocString;
const CSphColumnInfo * m_pBlobRowLocator = nullptr;
bool m_bKeepSomeAttrs = false;
bool m_bHasMva = false;
bool m_bHasString = false;
bool m_bHasBlobAttrs = false;
int m_iStride = 0;
const CSphRowitem * m_pRow = nullptr;
CSphVector<int64_t> m_dDataMva;
CSphString m_sDataString;
const CSphString m_sEmpty = "";
AttrSource_i * m_pBlobSource = nullptr;
DocID_t m_tDocid = 0;
QueryMvaContainer_c & m_tMvaContainer;
const BYTE * GetBlobData ( int iAttr, int & iLen, ESphAttr & eAttr )
{
const BYTE * pPool = m_pIndex->m_tBlobAttrs.GetReadPtr();
const CSphColumnInfo & tCol = m_pIndex->GetMatchSchema().GetAttr ( iAttr );
assert ( tCol.m_tLocator.IsBlobAttr() );
eAttr = tCol.m_eAttrType;
return sphGetBlobAttr ( m_pRow, tCol.m_tLocator, pPool, iLen );
}
};
void WarnAboutKillList ( const CSphVector<DocID_t> & dKillList, const KillListTargets_c & tTargets )
{
if ( dKillList.GetLength() && !tTargets.m_dTargets.GetLength() )
sphWarn ( "kill-list not empty but no killlist_target specified" );
if ( !dKillList.GetLength() )
{
for ( const auto & tTarget : tTargets.m_dTargets )
if ( tTarget.m_uFlags==KillListTarget_t::USE_KLIST )
{
sphWarn ( "killlist_target is specified, but kill-list is empty" );
break;
}
}
}
bool CSphIndex_VLN::SortDocidLookup ( int iFD, int nBlocks, int iMemoryLimit, int nLookupsInBlock, int nLookupsInLastBlock, CSphIndexProgress & tProgress ) // build only
{
tProgress.PhaseBegin ( CSphIndexProgress::PHASE_LOOKUP );
assert (!tProgress.m_iDocids);
tProgress.m_iDocidsTotal = m_tStats.m_iTotalDocuments;
if ( !nBlocks )
return true;
CSphWriter tfWriter;
if ( !tfWriter.OpenFile ( GetFilename ( SPH_EXT_SPT ), m_sLastError ) )
return false;
DocidLookupWriter_c tWriter ( tfWriter, (DWORD)m_tStats.m_iTotalDocuments );
tWriter.Start();
RawVector_T<CSphBin> dBins;
SphOffset_t iSharedOffset = -1;
dBins.Reserve ( nBlocks );
int iBinSize = CSphBin::CalcBinSize ( iMemoryLimit, nBlocks, "sort_lookup" );
dBins.Reserve_static (nBlocks);
for ( int i=0; i<nBlocks; ++i )
{
auto& dBin = dBins.Add();
dBin.m_iFileLeft = ( ( i==nBlocks-1 ) ? nLookupsInLastBlock : nLookupsInBlock )*sizeof(DocidRowidPair_t);
dBin.m_iFilePos = ( i==0 ) ? 0 : dBins[i-1].m_iFilePos + dBins[i-1].m_iFileLeft;
dBin.Init ( iFD, &iSharedOffset, iBinSize );
}
CSphFixedVector<DocidRowidPair_t> dTopDocIDs ( nBlocks );
CSphQueue<int, CmpQueuedLookup_fn> tLookupQueue ( nBlocks );
CmpQueuedLookup_fn::m_pStorage = dTopDocIDs.Begin();
for ( int i=0; i<nBlocks; ++i )
{
if ( dBins[i].ReadBytes ( &dTopDocIDs[i], sizeof(DocidRowidPair_t) )!=BIN_READ_OK )
{
m_sLastError.SetSprintf ( "sort_lookup: warmup failed (io error?)" );
return false;
}
tLookupQueue.Push(i);
}
DWORD tProcessed = 0;
while ( tLookupQueue.GetLength() )
{
int iBin = tLookupQueue.Root();
tWriter.AddPair ( dTopDocIDs[iBin] );
tLookupQueue.Pop();
ESphBinRead eRes = dBins[iBin].ReadBytes ( &dTopDocIDs[iBin], sizeof(DocidRowidPair_t) );
if ( eRes==BIN_READ_ERROR )
{
m_sLastError.SetSprintf ( "sort_lookup: failed to read entry" );
return false;
}
if ( eRes==BIN_READ_OK )
tLookupQueue.Push(iBin);
tProcessed++;
if ( ( tProcessed % 10000 )==0 )
{
tProgress.m_iDocids = tProcessed;
tProgress.Show ( );
}
}
if ( !tWriter.Finalize ( m_sLastError ) )
return false;
// clean up readers
dBins.Reset();
tProgress.m_iDocids = tProgress.m_iDocidsTotal;
tProgress.PhaseEnd();
return true;
}
bool CSphIndex_VLN::Build_SetupInplace ( SphOffset_t & iHitsGap, int iHitsMax, int iFdHits ) const
{
if ( !m_bInplaceSettings )
return true;
const int HIT_SIZE_AVG = 4;
const float HIT_BLOCK_FACTOR = 1.0f;
if ( m_iHitGap )
iHitsGap = (SphOffset_t) m_iHitGap;
else
iHitsGap = (SphOffset_t)( iHitsMax*HIT_BLOCK_FACTOR*HIT_SIZE_AVG );
iHitsGap = Max ( iHitsGap, 1 );
if ( !SeekAndWarn ( iFdHits, iHitsGap, "CSphIndex_VLN::Build" ))
return false;
return true;
}
void SetupDocstoreFields ( DocstoreAddField_i & tFields, const CSphSchema & tSchema )
{
int iStored = 0;
for ( int i = 0; i < tSchema.GetFieldsCount(); i++ )
if ( tSchema.IsFieldStored(i) )
{
tFields.AddField ( tSchema.GetFieldName(i), DOCSTORE_TEXT );
iStored++;
}
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
if ( tSchema.IsAttrStored(i) )
{
tFields.AddField ( tSchema.GetAttr(i).m_sName, DOCSTORE_ATTR );
iStored++;
}
assert(iStored);
}
bool CheckStoredFields ( const CSphSchema & tSchema, const CSphIndexSettings & tSettings, CSphString & sError )
{
for ( const auto & i : tSettings.m_dStoredFields )
if ( tSchema.GetAttr ( i.cstr() ) )
{
sError.SetSprintf ( "existing attribute specified in stored_fields: '%s'\n", i.cstr() );
return false;
}
for ( const auto & i : tSettings.m_dStoredOnlyFields )
if ( tSchema.GetAttr ( i.cstr() ) )
{
sError.SetSprintf ( "existing attribute specified in stored_fields: '%s'\n", i.cstr() );
return false;
}
return true;
}
bool CSphIndex_VLN::Build_SetupDocstore ( std::unique_ptr<DocstoreBuilder_i> & pDocstore, CSphBitvec & dStoredFields, CSphBitvec & dStoredAttrs, CSphVector<CSphVector<BYTE>> & dTmpDocstoreFieldStorage, CSphVector<CSphVector<BYTE>> & dTmpDocstoreAttrStorage )
{
if ( !m_tSchema.HasStoredFields() && !m_tSchema.HasStoredAttrs() )
return true;
BuildBufferSettings_t tSettings; // use default buffer settings
auto pBuilder = CreateDocstoreBuilder ( GetFilename ( SPH_EXT_SPDS ), GetSettings(), tSettings.m_iBufferStorage, m_sLastError );
if ( !pBuilder )
return false;
SetupDocstoreFields ( *pBuilder, m_tSchema );
dStoredFields.Init ( m_tSchema.GetFieldsCount() );
dStoredAttrs.Init ( m_tSchema.GetAttrsCount() );
for ( int i = 0; i < m_tSchema.GetFieldsCount(); i++ )
if ( pBuilder->GetFieldId ( m_tSchema.GetFieldName(i), DOCSTORE_TEXT )!=-1 )
dStoredFields.BitSet(i);
for ( int i = 0; i < m_tSchema.GetAttrsCount(); i++ )
if ( pBuilder->GetFieldId ( m_tSchema.GetAttr(i).m_sName, DOCSTORE_ATTR )!=-1 )
dStoredAttrs.BitSet(i);
dTmpDocstoreFieldStorage.Resize ( m_tSchema.GetFieldsCount() );
dTmpDocstoreAttrStorage.Resize ( m_tSchema.GetAttrsCount() );
pDocstore = std::move ( pBuilder );
return true;
}
bool CSphIndex_VLN::Build_SetupBlobBuilder ( std::unique_ptr<BlobRowBuilder_i> & pBuilder )
{
if ( !m_tSchema.HasBlobAttrs() )
return true;
BuildBufferSettings_t tSettings; // use default buffer settings
pBuilder = sphCreateBlobRowBuilder ( m_tSchema, GetTmpFilename ( SPH_EXT_SPB ), m_tSettings.m_tBlobUpdateSpace, tSettings.m_iBufferAttributes, m_sLastError );
return !!pBuilder;
}
bool CSphIndex_VLN::Build_SetupColumnar ( std::unique_ptr<columnar::Builder_i> & pBuilder, CSphBitvec & tColumnarsAttrs )
{
if ( !m_tSchema.HasColumnarAttrs() )
return true;
for ( int i = 0; i < m_tSchema.GetAttrsCount(); i++ )
if ( m_tSchema.GetAttr(i).IsColumnar() )
tColumnarsAttrs.BitSet(i);
BuildBufferSettings_t tSettings; // use default buffer settings
pBuilder = CreateColumnarBuilder ( m_tSchema, GetTmpFilename ( SPH_EXT_SPC ), tSettings.m_iBufferColumnar, m_sLastError );
return !!pBuilder;
}
bool CSphIndex_VLN::Build_SetupSI ( std::unique_ptr<SI::Builder_i> & pSIBuilder, std::unique_ptr<JsonSIBuilder_i> & pJsonSIBuilder, CSphBitvec & tSIAttrs, int64_t iMemoryLimit )
{
if ( !IsSecondaryLibLoaded() )
return true;
BuildBufferSettings_t tSettings; // use default buffer settings
pSIBuilder = CreateIndexBuilder ( iMemoryLimit, m_tSchema, tSIAttrs, GetFilename ( SPH_EXT_SPIDX ), tSettings.m_iBufferStorage, m_sLastError );
if ( !pSIBuilder )
return false;
if ( m_tSchema.HasJsonSIAttrs() )
{
pJsonSIBuilder = CreateJsonSIBuilder ( m_tSchema, GetFilename(SPH_EXT_SPB), GetFilename(SPH_EXT_SPJIDX), m_sLastError );
if ( !pJsonSIBuilder )
return false;
}
return true;
}
static bool BuildSetupHistograms ( const ISphSchema & tSchema, std::unique_ptr<HistogramContainer_c> & pContainer, CSphVector<HistogramSource_t> & dHistograms )
{
const int MAX_HISTOGRAM_SIZE = 8192;
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = tSchema.GetAttr(i);
Histogram_i * pHistogram = CreateHistogram ( tAttr.m_sName, tAttr.m_eAttrType, MAX_HISTOGRAM_SIZE ).release();
if ( pHistogram )
dHistograms.Add ( { pHistogram, i, tAttr.m_eAttrType } );
}
if ( !dHistograms.GetLength() )
return true;
pContainer = std::make_unique<HistogramContainer_c>();
for ( const auto & i : dHistograms )
Verify ( pContainer->Add ( std::unique_ptr<Histogram_i> { i.m_pHist } ) );
return true;
}
static VecTraits_T<const BYTE> GetAttrForDocstore ( DocID_t tDocID, int iAttr, const CSphSchema & tSchema, QueryMvaContainer_c & tMvaContainer, CSphSource & tSource, CSphVector<BYTE> & dTmpStorage )
{
const CSphColumnInfo & tAttr = tSchema.GetAttr(iAttr);
switch ( tAttr.m_eAttrType )
{
case SPH_ATTR_STRING:
{
const CSphString & sStrAttr = tSource.GetStrAttr(iAttr);
return { (const BYTE*)sStrAttr.cstr(), sStrAttr.Length() };
}
case SPH_ATTR_UINT32SET:
{
const CSphVector<int64_t> * pMva = FetchMVA ( tDocID, iAttr, tAttr, tMvaContainer, tSource, false );
dTmpStorage.Resize ( pMva->GetLength()*sizeof(DWORD) );
DWORD * pAttrs = (DWORD*)dTmpStorage.Begin();
for ( int iValue = 0; iValue < pMva->GetLength(); iValue++ )
pAttrs[iValue] = (DWORD)(*pMva)[iValue];
return dTmpStorage;
}
case SPH_ATTR_INT64SET:
{
const CSphVector<int64_t> * pMva = FetchMVA ( tDocID, iAttr, tAttr, tMvaContainer, tSource, false );
return { pMva ? (const BYTE*)pMva->Begin() : nullptr, pMva ? (int64_t)pMva->GetLengthBytes() : 0 };
}
case SPH_ATTR_BIGINT:
{
int64_t iValue = tSource.GetAttr(iAttr);
dTmpStorage.Resize ( sizeof(iValue) );
memcpy ( dTmpStorage.Begin(), &iValue, dTmpStorage.GetLength() );
return dTmpStorage;
}
default:
// assume 32-bit integer
uint32_t uValue = tSource.GetAttr(iAttr);
dTmpStorage.Resize ( sizeof(uValue) );
memcpy ( dTmpStorage.Begin(), &uValue, dTmpStorage.GetLength() );
return dTmpStorage;
}
}
static uint64_t CreateJoinedKey ( DocID_t tDocID, int iEntry )
{
uint64_t uRes = sphFNV64 ( &tDocID, sizeof(tDocID) );
return sphFNV64 ( &iEntry, sizeof(iEntry), uRes );
}
void CSphIndex_VLN::Build_AddToDocstore ( DocstoreBuilder_i * pDocstoreBuilder, DocID_t tDocID, QueryMvaContainer_c & tMvaContainer, CSphSource & tSource, const CSphBitvec & dStoredFields, const CSphBitvec & dStoredAttrs, CSphVector<CSphVector<BYTE>> & dTmpDocstoreFieldStorage, CSphVector<CSphVector<BYTE>> & dTmpDocstoreAttrStorage, const CSphVector<std::unique_ptr<OpenHashTable_T<uint64_t, uint64_t>>> & dJoinedOffsets, CSphReader & tJoinedReader )
{
if ( !pDocstoreBuilder )
return;
DocstoreBuilder_i::Doc_t tDoc;
tSource.GetDocFields ( tDoc.m_dFields );
assert ( tDoc.m_dFields.GetLength()==m_tSchema.GetFieldsCount() );
// filter out non-hl fields (should already be null)
int iField = 0;
for ( int i = 0; i < dStoredFields.GetSize(); i++ )
{
if ( !dStoredFields.BitGet(i) )
tDoc.m_dFields.Remove(iField);
else
{
// override with joined fields that were already prefetched
if ( dJoinedOffsets.GetLength() && dJoinedOffsets[i] )
{
uint64_t * pOffset;
int iEntry = 0;
auto & dTmp = dTmpDocstoreFieldStorage[i];
dTmp.Resize(0);
while ( ( pOffset = dJoinedOffsets[i]->Find ( CreateJoinedKey ( tDocID, iEntry ) ) ) != nullptr )
{
tJoinedReader.SeekTo ( *pOffset, 0 );
tJoinedReader.UnzipOffset(); // docid
tJoinedReader.UnzipInt(); // joined field id
if ( m_tSchema.GetField(i).m_bPayload )
tJoinedReader.UnzipInt(); // payload
DWORD uLength = tJoinedReader.UnzipInt();
DWORD uOldFieldLength = dTmp.GetLength();
DWORD uSpaceOffset = uOldFieldLength ? 1 : 0;
DWORD uNewFieldLength = uOldFieldLength + uLength + uSpaceOffset;
dTmp.Resize(uNewFieldLength);
tJoinedReader.GetBytes ( &dTmp[uOldFieldLength+uSpaceOffset], uLength );
if ( uSpaceOffset )
dTmp[uOldFieldLength] = ' ';
iEntry++;
}
tDoc.m_dFields[iField] = dTmp;
}
iField++;
}
}
VecTraits_T<BYTE> * pAddedAttrs = tDoc.m_dFields.AddN ( dStoredAttrs.BitCount() );
int iAttr = 0;
for ( int i = 0; i < dStoredAttrs.GetSize(); i++ )
if ( dStoredAttrs.BitGet(i) )
pAddedAttrs[iAttr++] = GetAttrForDocstore ( tDocID, i, m_tSchema, tMvaContainer, tSource, dTmpDocstoreAttrStorage[i] );
pDocstoreBuilder->AddDoc ( tSource.m_tDocInfo.m_tRowID, tDoc );
}
int CSphIndex_VLN::Build ( const CSphVector<CSphSource*> & dSources, int iMemoryLimit, int iWriteBuffer, CSphIndexProgress& tProgress )
{
assert ( dSources.GetLength() );
CSphVector<SphWordID_t> dHitlessWords;
if ( !LoadHitlessWords ( m_tSettings.m_sHitlessFiles, m_pTokenizer, m_pDict, dHitlessWords, m_sLastError ) )
return 0;
// vars shared between phases
RawVector_T<CSphBin> dBins;
SphOffset_t iSharedOffset = -1;
m_pDict->HitblockBegin();
// setup sources
ARRAY_FOREACH ( iSource, dSources )
{
CSphSource * pSource = dSources[iSource];
assert ( pSource );
pSource->SetDict ( m_pDict );
pSource->Setup ( m_tSettings, nullptr );
}
// connect 1st source and fetch its schema
// and don't disconnect it because some sources can't survive connect/disconnect
CSphSource * pSource0 = dSources[0];
if ( !pSource0->Connect ( m_sLastError )
|| !pSource0->IterateStart ( m_sLastError )
|| !pSource0->UpdateSchema ( &m_tSchema, m_sLastError )
|| !pSource0->SetupMorphFields ( m_sLastError ) )
{
return 0;
}
if ( !CheckStoredFields ( m_tSchema, m_tSettings, m_sLastError ) )
return 0;
bool bHaveJoined = pSource0->HasJoinedFields();
bool bHaveQueryMVAs = false;
for ( int i=0; i<m_tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = m_tSchema.GetAttr(i);
if ( ( tAttr.m_eAttrType==SPH_ATTR_UINT32SET || tAttr.m_eAttrType==SPH_ATTR_INT64SET ) && tAttr.m_eSrc!=SPH_ATTRSRC_FIELD )
{
bHaveQueryMVAs = true;
break;
}
}
// non-SQL sources don't survive connect+disconnect+reconnect
// and if we don't disconnect now we won't be able to fetch query MVAs from SQL sources
QueryMvaContainer_c tQueryMvaContainer;
if ( bHaveQueryMVAs )
{
pSource0->Disconnect();
// temporary storage for MVAs that we fetch from queries
// we might want to dump that to file later
if ( !Build_CollectQueryMvas ( dSources, tQueryMvaContainer ) )
return 0;
}
CSphAutofile tTmpJoinedFields ( GetFilename ( "tmp3" ), SPH_O_NEW, m_sLastError, true );
CSphVector<std::unique_ptr<OpenHashTable_T<uint64_t, uint64_t>>> dJoinedOffsets;
CSphReader tJoinedReader;
if ( bHaveJoined )
{
pSource0->Disconnect();
if ( !Build_CollectJoinedFields ( dSources, tTmpJoinedFields, dJoinedOffsets ) )
return 0;
tJoinedReader.SetFile(tTmpJoinedFields);
}
int iFieldLens = m_tSchema.GetAttrId_FirstFieldLen();
const CSphColumnInfo * pBlobLocatorAttr = m_tSchema.GetAttr ( sphGetBlobLocatorName() );
if ( !m_pTokenizer->SetFilterSchema ( m_tSchema, m_sLastError ) )
return 0;
int iHitBuilderBufferSize = ( iWriteBuffer>0 )
? Max ( iWriteBuffer, MIN_WRITE_BUFFER )
: DEFAULT_WRITE_BUFFER;
CSphHitBuilder tHitBuilder ( m_tSettings, dHitlessWords, false, iHitBuilderBufferSize, m_pDict, &m_sLastError, nullptr );
////////////////////////////////////////////////
// collect and partially sort hits
////////////////////////////////////////////////
CSphVector <DocID_t> dKillList;
// adjust memory requirements
int iOldLimit = iMemoryLimit;
// book at least 2 MB for keywords dict, if needed
int iDictSize = 0;
if ( m_pDict->GetSettings().m_bWordDict )
iDictSize = Max ( MIN_KEYWORDS_DICT, iMemoryLimit/8 );
// reserve for sorting docid-rowid pairs
int iDocidLookupSize = Max ( 32768, iMemoryLimit/16 );
iMemoryLimit -= iDocidLookupSize+iDictSize;
// do we have enough left for hits?
int iHitsMax = 1048576;
if ( iMemoryLimit < iHitsMax*(int)sizeof(CSphWordHit) )
{
iMemoryLimit = iOldLimit + iHitsMax*sizeof(CSphWordHit) - iMemoryLimit;
sphWarn ( "collect_hits: mem_limit=%d kb too low, increasing to %d kb", iOldLimit/1024, iMemoryLimit/1024 );
} else
iHitsMax = iMemoryLimit / sizeof(CSphWordHit);
// allocate raw hits block
CSphFixedVector<CSphWordHit> dHits ( iHitsMax + MAX_SOURCE_HITS );
CSphWordHit * pHits = dHits.Begin();
CSphWordHit * pHitsMax = dHits.Begin() + iHitsMax;
int nDocidLookupsPerBlock = iDocidLookupSize/sizeof(DocidRowidPair_t);
int nDocidLookup = 0;
int nDocidLookupBlocks = 0;
CSphFixedVector<DocidRowidPair_t> dDocidLookup ( nDocidLookupsPerBlock );
// fallback blob source (for mva)
KeepAttrs_c tPrevAttrs ( tQueryMvaContainer );
const bool bGotPrevIndex = tPrevAttrs.Init ( m_sKeepAttrs, m_dKeepAttrs, m_tSchema );
// create temp files
CSphAutofile fdLock ( GetFilename ( "tmp0" ), SPH_O_NEW, m_sLastError, true );
CSphAutofile fdHits ( ( m_bInplaceSettings ? GetFilename ( SPH_EXT_SPP ) : GetFilename ( "tmp1" ) ), SPH_O_NEW, m_sLastError, true );
CSphAutofile fdTmpLookup ( GetFilename ( "tmp2" ), SPH_O_NEW, m_sLastError, true );
CSphWriter tWriterSPA;
bool bHaveNonColumnarAttrs = m_tSchema.HasNonColumnarAttrs();
// write to temp file because of possible --keep-attrs option which loads prev index
if ( bHaveNonColumnarAttrs && !tWriterSPA.OpenFile ( GetTmpFilename ( SPH_EXT_SPA ), m_sLastError ) )
return 0;
if ( fdLock.GetFD()<0 || fdHits.GetFD()<0 )
return 0;
std::unique_ptr<BlobRowBuilder_i> pBlobRowBuilder;
if ( !Build_SetupBlobBuilder(pBlobRowBuilder) )
return 0;
std::unique_ptr<columnar::Builder_i> pColumnarBuilder;
CSphBitvec tColumnarAttrs ( m_tSchema.GetAttrsCount() );
if ( !Build_SetupColumnar ( pColumnarBuilder, tColumnarAttrs ) )
return 0;
std::unique_ptr<SI::Builder_i> pSIBuilder;
std::unique_ptr<JsonSIBuilder_i> pJsonSIBuilder;
CSphBitvec tSIAttrs ( m_tSchema.GetAttrsCount() );
if ( !Build_SetupSI ( pSIBuilder, pJsonSIBuilder, tSIAttrs, iMemoryLimit ) )
return 0;
std::unique_ptr<DocstoreBuilder_i> pDocstoreBuilder;
CSphBitvec dStoredFields, dStoredAttrs;
CSphVector<CSphVector<BYTE>> dTmpDocstoreFieldStorage, dTmpDocstoreAttrStorage;
if ( !Build_SetupDocstore ( pDocstoreBuilder, dStoredFields, dStoredAttrs, dTmpDocstoreFieldStorage, dTmpDocstoreAttrStorage ) )
return 0;
std::unique_ptr<HistogramContainer_c> pHistogramContainer;
CSphVector<HistogramSource_t> dHistograms;
if ( !BuildSetupHistograms ( m_tSchema, pHistogramContainer, dHistograms ) )
return 0;
SphOffset_t iHitsGap = 0;
if ( !Build_SetupInplace ( iHitsGap, iHitsMax, fdHits.GetFD() ) )
return 0;
if ( !sphLockEx ( fdLock.GetFD(), false ) )
{
m_sLastError.SetSprintf ( "failed to lock '%s': another indexer running?", fdLock.GetFilename() );
return 0;
}
m_tStats.Reset ();
tProgress.PhaseBegin ( CSphIndexProgress::PHASE_COLLECT );
CSphVector<int> dHitBlocks;
dHitBlocks.Reserve ( 1024 );
AttrIndexBuilder_c tMinMax(m_tSchema);
RowID_t tRowID = 0;
int64_t iHitsTotal = 0;
ARRAY_FOREACH ( iSource, dSources )
{
// connect and check schema
CSphSource * pSource = dSources[iSource];
bool bNeedToConnect = iSource>0 || bHaveQueryMVAs || bHaveJoined;
if ( bNeedToConnect )
{
if ( !pSource->Connect ( m_sLastError )
|| !pSource->IterateStart ( m_sLastError )
|| !pSource->UpdateSchema ( &m_tSchema, m_sLastError )
|| !pSource->SetupMorphFields ( m_sLastError ) )
{
return 0;
}
}
// fallback blob source (for string and json )
if ( bGotPrevIndex )
tPrevAttrs.SetBlobSource ( pSource );
// fetch documents
for ( ;; )
{
// get next doc, and handle errors
bool bEOF = false;
if ( !pSource->IterateDocument ( bEOF, m_sLastError ) )
return 0;
// check if we have no more documents
if ( bEOF )
break;
pSource->m_tDocInfo.m_tRowID = tRowID++;
DocID_t tDocID = pSource->GetAttr(0);
pSource->RowIDAssigned ( tDocID, tRowID-1 );
bool bKeepRow = ( bGotPrevIndex && tPrevAttrs.Keep ( tDocID ) );
// show progress bar
if ( ( pSource->GetStats().m_iTotalDocuments % 1000 )==0 )
{
tProgress.m_iDocuments = m_tStats.m_iTotalDocuments + pSource->GetStats().m_iTotalDocuments;
tProgress.m_iBytes = m_tStats.m_iTotalBytes + pSource->GetStats().m_iTotalBytes;
tProgress.Show();
}
// update crashdump
g_iIndexerCurrentDocID = pSource->m_tDocInfo.m_tRowID;
g_iIndexerCurrentHits = pHits-dHits.Begin();
// store mva, strings and JSON blobs
if ( pBlobRowBuilder )
{
AttrSource_i * pBlobSource = pSource;
if ( bKeepRow )
pBlobSource = &tPrevAttrs;
std::pair<SphOffset_t,SphOffset_t> tOffsetSize = {0,0};
if ( !Build_StoreBlobAttrs ( tDocID, tOffsetSize, *pBlobRowBuilder, tQueryMvaContainer, *pBlobSource, bKeepRow ) )
return 0;
if ( pJsonSIBuilder )
pJsonSIBuilder->AddRowOffsetSize(tOffsetSize);
pSource->m_tDocInfo.SetAttr ( pBlobLocatorAttr->m_tLocator, tOffsetSize.first );
}
// store anything columnar
if ( pColumnarBuilder )
Builder_StoreAttrs ( m_tSchema, tDocID, *pSource, tQueryMvaContainer, pColumnarBuilder.get(), tColumnarAttrs );
if ( pSIBuilder )
{
pSIBuilder->SetRowID ( pSource->m_tDocInfo.m_tRowID );
Builder_StoreAttrs ( m_tSchema, tDocID, *pSource, tQueryMvaContainer, pSIBuilder.get(), tSIAttrs );
}
BuildStoreHistograms ( m_tSchema, tDocID, *pSource, tQueryMvaContainer, dHistograms );
// store hits
while ( const ISphHits * pDocHits = pSource->IterateHits ( m_sLastWarning ) )
{
int iDocHits = pDocHits->GetLength();
#if PARANOID
for ( int i=0; i<iDocHits; i++ )
{
assert ( pDocHits->m_dData[i].m_uDocID==pSource->m_tDocInfo.m_uDocID );
assert ( pDocHits->m_dData[i].m_uWordID );
assert ( pDocHits->m_dData[i].m_iWordPos );
}
#endif
assert ( ( pHits+iDocHits )<=( pHitsMax+MAX_SOURCE_HITS ) );
memcpy ( pHits, pDocHits->Begin(), iDocHits*sizeof(CSphWordHit) );
pHits += iDocHits;
// check if we need to flush
if ( pHits<pHitsMax && !( iDictSize && m_pDict->HitblockGetMemUse() > iDictSize ) )
continue;
// update crashdump
g_iIndexerPoolStartDocID = tDocID;
g_iIndexerPoolStartHit = pHits-dHits.Begin();
// sort hits
int iHits = int ( pHits - dHits.Begin() );
{
sphSort ( dHits.Begin(), iHits, CmpHit_fn() );
m_pDict->HitblockPatch ( dHits.Begin(), iHits );
}
pHits = dHits.Begin();
// flush hits, docs are flushed independently
dHitBlocks.Add ( tHitBuilder.cidxWriteRawVLB ( fdHits.GetFD(), dHits.Begin(), iHits ) );
m_pDict->HitblockReset ();
if ( dHitBlocks.Last()<0 )
return 0;
// progress bar
iHitsTotal += iHits;
tProgress.m_iDocuments = m_tStats.m_iTotalDocuments + pSource->GetStats().m_iTotalDocuments;
tProgress.m_iBytes = m_tStats.m_iTotalBytes + pSource->GetStats().m_iTotalBytes;
tProgress.Show();
}
// update total field lengths
if ( iFieldLens>=0 )
{
for ( int i=0; i < m_tSchema.GetFieldsCount(); i++ )
m_dFieldLens[i] += pSource->m_tDocInfo.GetAttr ( m_tSchema.GetAttr ( i+iFieldLens ).m_tLocator );
}
if ( bHaveNonColumnarAttrs )
{
// store docinfo
// with the advent of SPH_ATTR_TOKENCOUNT, now MUST be done AFTER iterating the hits
// because field lengths are computed during that iterating
const CSphRowitem * pRow = pSource->m_tDocInfo.m_pDynamic;
if ( bKeepRow )
pRow = tPrevAttrs.GetRow ( pSource->m_tDocInfo.m_pDynamic );
tMinMax.Collect ( pRow );
tWriterSPA.PutBytes ( pRow, sizeof(CSphRowitem)*m_tSchema.GetRowSize() );
}
dDocidLookup[nDocidLookup].m_tDocID = tDocID;
dDocidLookup[nDocidLookup].m_tRowID = pSource->m_tDocInfo.m_tRowID;
nDocidLookup++;
if ( nDocidLookup==dDocidLookup.GetLength() )
{
dDocidLookup.Sort ( CmpDocidLookup_fn() );
if ( !sphWriteThrottled ( fdTmpLookup.GetFD (), &dDocidLookup[0], nDocidLookup*sizeof(DocidRowidPair_t), "temp_docid_lookup", m_sLastError ) )
return 0;
nDocidLookup = 0;
nDocidLookupBlocks++;
}
Build_AddToDocstore ( pDocstoreBuilder.get(), tDocID, tQueryMvaContainer, *pSource, dStoredFields, dStoredAttrs, dTmpDocstoreFieldStorage, dTmpDocstoreAttrStorage, dJoinedOffsets, tJoinedReader );
// go on, loop next document
}
// FIXME! uncontrolled memory usage; add checks and/or diskbased sort in the future?
if ( pSource->IterateKillListStart ( m_sLastError ) )
{
DocID_t tKilllistDocID;
while ( pSource->IterateKillListNext ( tKilllistDocID ) )
dKillList.Add ( tKilllistDocID );
} else if ( !m_sLastError.IsEmpty() )
return 0;
// fetch joined fields
if ( bHaveJoined )
{
// flush tail of regular hits
int iHits = int ( pHits - dHits.Begin() );
if ( iDictSize && m_pDict->HitblockGetMemUse() && iHits )
{
sphSort ( dHits.Begin(), iHits, CmpHit_fn() );
m_pDict->HitblockPatch ( dHits.Begin(), iHits );
pHits = dHits.Begin();
iHitsTotal += iHits;
dHitBlocks.Add ( tHitBuilder.cidxWriteRawVLB ( fdHits.GetFD(), dHits.Begin(), iHits ) );
if ( dHitBlocks.Last()<0 )
return 0;
m_pDict->HitblockReset ();
}
tJoinedReader.SeekTo(0,0);
while (true)
{
// get next doc, and handle errors
ISphHits * pJoinedHits = pSource->IterateJoinedHits ( tJoinedReader, m_sLastError );
if ( !pJoinedHits )
return 0;
// check for eof
if ( pSource->m_tDocInfo.m_tRowID==INVALID_ROWID )
break;
int iJoinedHits = pJoinedHits->GetLength();
memcpy ( pHits, pJoinedHits->Begin(), iJoinedHits*sizeof(CSphWordHit) );
pHits += iJoinedHits;
// check if we need to flush
if ( pHits<pHitsMax && !( iDictSize && m_pDict->HitblockGetMemUse() > iDictSize ) )
continue;
// store hits
int iStoredHits = int ( pHits - dHits.Begin() );
sphSort ( dHits.Begin(), iStoredHits, CmpHit_fn() );
m_pDict->HitblockPatch ( dHits.Begin(), iStoredHits );
pHits = dHits.Begin();
iHitsTotal += iStoredHits;
dHitBlocks.Add ( tHitBuilder.cidxWriteRawVLB ( fdHits.GetFD(), dHits.Begin(), iStoredHits ) );
if ( dHitBlocks.Last()<0 )
return 0;
m_pDict->HitblockReset ();
}
}
// this source is over, disconnect and update stats
pSource->Disconnect ();
m_tStats.m_iTotalDocuments += pSource->GetStats().m_iTotalDocuments;
m_tStats.m_iTotalBytes += pSource->GetStats().m_iTotalBytes;
}
if ( m_tStats.m_iTotalDocuments>=INT_MAX )
{
m_sLastError.SetSprintf ( "table over %d documents not supported (got documents count=" INT64_FMT ")", INT_MAX, m_tStats.m_iTotalDocuments );
return 0;
}
// flush last hit block
if ( pHits>dHits.Begin() )
{
int iHits = int ( pHits - dHits.Begin() );
{
sphSort ( dHits.Begin(), iHits, CmpHit_fn() );
m_pDict->HitblockPatch ( dHits.Begin(), iHits );
}
iHitsTotal += iHits;
dHitBlocks.Add ( tHitBuilder.cidxWriteRawVLB ( fdHits.GetFD(), dHits.Begin(), iHits ) );
m_pDict->HitblockReset ();
if ( dHitBlocks.Last()<0 )
return 0;
}
// reset hits pool
dHits.Reset(0);
if ( nDocidLookup )
{
dDocidLookup.Sort ( CmpDocidLookup_fn(), 0, nDocidLookup-1 );
if ( !sphWriteThrottled ( fdTmpLookup.GetFD(), &dDocidLookup[0], nDocidLookup*sizeof(DocidRowidPair_t), "temp_docid_lookup", m_sLastError ) )
return 0;
nDocidLookupBlocks++;
}
dDocidLookup.Reset(0);
tProgress.m_iDocuments = m_tStats.m_iTotalDocuments;
tProgress.m_iBytes = m_tStats.m_iTotalBytes;
tProgress.PhaseEnd();
if ( bHaveNonColumnarAttrs )
{
if ( m_tStats.m_iTotalDocuments )
{
tMinMax.FinishCollect();
const CSphTightVector<CSphRowitem> & dMinMaxRows = tMinMax.GetCollected();
tWriterSPA.PutBytes ( dMinMaxRows.Begin(), dMinMaxRows.GetLength()*sizeof(CSphRowitem) );
m_iDocinfoIndex = ( dMinMaxRows.GetLength() / m_tSchema.GetRowSize() / 2 ) - 1;
}
tWriterSPA.CloseFile();
if ( tWriterSPA.IsError() )
{
m_sLastError = "error writing .SPA";
return false;
}
}
if ( pBlobRowBuilder && !pBlobRowBuilder->Done(m_sLastError) )
return 0;
std::string sError;
if ( pColumnarBuilder && !pColumnarBuilder->Done(sError) )
{
m_sLastError = sError.c_str();
return 0;
}
if ( pSIBuilder )
{
tProgress.PhaseBegin ( CSphIndexProgress::PHASE_SI_BUILD );
tProgress.Show();
bool bSiDone = pSIBuilder->Done(sError);
tProgress.PhaseEnd();
if ( !bSiDone )
{
m_sLastError = sError.c_str();
return 0;
}
}
if ( pHistogramContainer && !pHistogramContainer->Save ( GetFilename ( SPH_EXT_SPHI ), m_sLastError ) )
return 0;
if ( bGotPrevIndex )
tPrevAttrs.Reset();
CSphString sSPA = GetFilename ( SPH_EXT_SPA );
CSphString sSPATmp = GetTmpFilename ( SPH_EXT_SPA );
if ( bHaveNonColumnarAttrs && sph::rename ( sSPATmp.cstr(), sSPA.cstr() )!=0 )
{
m_sLastError.SetSprintf ( "failed to rename %s to %s", sSPATmp.cstr(), sSPA.cstr() );
return false;
}
CSphString sSPB = GetFilename ( SPH_EXT_SPB );
CSphString sSPBTmp = GetTmpFilename ( SPH_EXT_SPB );
if ( m_tSchema.HasBlobAttrs() && sph::rename ( sSPBTmp.cstr(), sSPB.cstr() )!=0 )
{
m_sLastError.SetSprintf ( "failed to rename %s to %s", sSPBTmp.cstr(), sSPB.cstr() );
return false;
}
CSphString sSPC = GetFilename ( SPH_EXT_SPC );
CSphString sSPCTmp = GetTmpFilename ( SPH_EXT_SPC );
if ( m_tSchema.HasColumnarAttrs() && sph::rename ( sSPCTmp.cstr(), sSPC.cstr() )!=0 )
{
m_sLastError.SetSprintf ( "failed to rename %s to %s", sSPCTmp.cstr(), sSPC.cstr() );
return false;
}
if ( pJsonSIBuilder )
{
tProgress.PhaseBegin ( CSphIndexProgress::PHASE_JSONSI_BUILD );
tProgress.Show();
bool bSiDone = pJsonSIBuilder->Done(m_sLastError);
tProgress.PhaseEnd();
if ( !bSiDone )
return 0;
}
if ( !WriteDeadRowMap ( GetFilename ( SPH_EXT_SPM ), (DWORD)m_tStats.m_iTotalDocuments, m_sLastError ) )
return 0;
dKillList.Uniq();
if ( !WriteKillList ( GetFilename ( SPH_EXT_SPK ), dKillList.Begin(), dKillList.GetLength(), m_tSettings.m_tKlistTargets, m_sLastError ) )
return 0;
if ( pDocstoreBuilder )
pDocstoreBuilder->Finalize();
WarnAboutKillList ( dKillList, m_tSettings.m_tKlistTargets );
m_iMinMaxIndex = m_tStats.m_iTotalDocuments*m_tSchema.GetRowSize();
if ( !SortDocidLookup ( fdTmpLookup.GetFD(), nDocidLookupBlocks, iMemoryLimit, nDocidLookupsPerBlock, nDocidLookup, tProgress ) )
return 0;
///////////////////////////////////
// sort and write compressed index
///////////////////////////////////
// initialize readers
assert ( dBins.IsEmpty() );
dBins.Reserve_static ( dHitBlocks.GetLength() );
iSharedOffset = -1;
float fReadFactor = 1.0f;
int iRelocationSize = 0;
iWriteBuffer = iHitBuilderBufferSize;
if ( m_bInplaceSettings )
{
assert ( m_fRelocFactor > 0.005f && m_fRelocFactor < 0.95f );
assert ( m_fWriteFactor > 0.005f && m_fWriteFactor < 0.95f );
assert ( m_fWriteFactor+m_fRelocFactor < 1.0f );
fReadFactor -= m_fRelocFactor + m_fWriteFactor;
iRelocationSize = int ( iMemoryLimit * m_fRelocFactor );
iWriteBuffer = int ( iMemoryLimit * m_fWriteFactor );
}
int iBinSize = CSphBin::CalcBinSize ( int ( iMemoryLimit * fReadFactor ),
dHitBlocks.GetLength() + m_pDict->GetSettings().m_bWordDict, "sort_hits" );
CSphFixedVector <BYTE> dRelocationBuffer ( iRelocationSize );
iSharedOffset = -1;
ARRAY_FOREACH ( i, dHitBlocks )
{
dBins.Emplace_back ( m_tSettings.m_eHitless, m_pDict->GetSettings().m_bWordDict );
dBins[i].m_iFileLeft = dHitBlocks[i];
dBins[i].m_iFilePos = ( i==0 ) ? iHitsGap : dBins[i-1].m_iFilePos + dBins[i-1].m_iFileLeft;
dBins[i].Init ( fdHits.GetFD(), &iSharedOffset, iBinSize );
}
// if there were no hits, create zero-length index files
int iRawBlocks = dBins.GetLength();
//////////////////////////////
// create new index files set
//////////////////////////////
tHitBuilder.CreateIndexFiles ( GetFilename ( SPH_EXT_SPD ), GetFilename ( SPH_EXT_SPP ), GetFilename ( SPH_EXT_SPE ), m_bInplaceSettings, iWriteBuffer, fdHits, &iSharedOffset );
// dict files
CSphAutofile fdTmpDict ( GetFilename ( "tmp8" ), SPH_O_NEW, m_sLastError, true );
CSphAutofile fdDict ( GetFilename ( SPH_EXT_SPI ), SPH_O_NEW, m_sLastError, false );
if ( fdTmpDict.GetFD()<0 || fdDict.GetFD()<0 )
return 0;
m_pDict->DictBegin ( fdTmpDict, fdDict, iBinSize );
//////////////
// final sort
//////////////
if ( iRawBlocks )
{
int iLastBin = dBins.GetLength () - 1;
SphOffset_t iHitFileSize = dBins[iLastBin].m_iFilePos + dBins [iLastBin].m_iFileLeft;
CSphHitQueue tQueue ( iRawBlocks );
AggregateHit_t tHit;
// initialize hitlist encoder state
tHitBuilder.HitReset();
// initial fill
CSphFixedVector<BYTE> dActive ( iRawBlocks );
for ( int i=0; i<iRawBlocks; i++ )
{
if ( !dBins[i].ReadHit ( &tHit ) )
{
m_sLastError.SetSprintf ( "sort_hits: warmup failed (io error?)" );
return 0;
}
dActive[i] = ( tHit.m_uWordID!=0 );
if ( dActive[i] )
tQueue.Push ( tHit, i );
}
// init progress meter
tProgress.PhaseBegin ( CSphIndexProgress::PHASE_SORT );
assert ( !tProgress.m_iHits );
tProgress.m_iHitsTotal = iHitsTotal;
// while the queue has data for us
// FIXME! analyze binsRead return code
int iHitsSorted = 0;
int iMinBlock = -1;
while ( tQueue.m_iUsed )
{
int iBin = tQueue.m_pData->m_iBin;
// pack and emit queue root
if ( m_bInplaceSettings )
{
if ( iMinBlock==-1 || dBins[iMinBlock].IsEOF () || !dActive[iMinBlock] )
{
iMinBlock = -1;
ARRAY_FOREACH ( i, dBins )
if ( !dBins[i].IsEOF () && dActive[i] && ( iMinBlock==-1 || dBins[i].m_iFilePos < dBins[iMinBlock].m_iFilePos ) )
iMinBlock = i;
}
int iToWriteMax = 3*sizeof(DWORD);
if ( iMinBlock!=-1 && ( tHitBuilder.GetHitfilePos() + iToWriteMax ) > dBins[iMinBlock].m_iFilePos )
{
if ( !RelocateBlock ( fdHits.GetFD (), dRelocationBuffer.Begin(), iRelocationSize, &iHitFileSize, dBins[iMinBlock], &iSharedOffset ) )
return 0;
iMinBlock = (iMinBlock+1) % dBins.GetLength ();
}
}
tHitBuilder.cidxHit ( tQueue.m_pData );
if ( tHitBuilder.IsError() )
return 0;
// pop queue root and push next hit from popped bin
tQueue.Pop ();
if ( dActive[iBin] )
{
dBins[iBin].ReadHit ( &tHit );
dActive[iBin] = ( tHit.m_uWordID!=0 );
if ( dActive[iBin] )
tQueue.Push ( tHit, iBin );
}
// progress
if ( ++iHitsSorted==1000000 )
{
tProgress.m_iHits += iHitsSorted;
tProgress.Show();
iHitsSorted = 0;
}
}
tProgress.m_iHits = tProgress.m_iHitsTotal; // sum might be less than total because of dupes!
tProgress.PhaseEnd();
dBins.Reset ();
AggregateHit_t tFlush;
tFlush.m_tRowID = INVALID_ROWID;
tFlush.m_uWordID = 0;
tFlush.m_szKeyword = nullptr;
tFlush.m_iWordPos = EMPTY_HIT;
tFlush.m_dFieldMask.UnsetAll();
tHitBuilder.cidxHit ( &tFlush );
if ( m_bInplaceSettings )
{
tHitBuilder.CloseHitlist();
if ( !sphTruncate ( fdHits.GetFD () ) )
sphWarn ( "failed to truncate %s", fdHits.GetFilename() );
}
}
BuildHeader_t tBuildHeader;
WriteHeader_t tWriteHeader;
PrepareHeaders ( tBuildHeader, tWriteHeader, false );
tBuildHeader.m_iDocinfo = m_tStats.m_iTotalDocuments;
if ( !tHitBuilder.cidxDone ( iMemoryLimit, m_tSettings.m_iMinInfixLen, m_pTokenizer->GetMaxCodepointLength(), &tBuildHeader ) )
return 0;
dRelocationBuffer.Reset(0);
// we're done
if ( !IndexBuildDone ( tBuildHeader, tWriteHeader, GetFilename(SPH_EXT_SPH), m_sLastError ) )
return 0;
// when the party's over..
ARRAY_FOREACH ( i, dSources )
dSources[i]->PostIndex ();
if ( m_bInplaceSettings )
fdHits.SetPersistent();
return 1;
} // NOLINT function length
/////////////////////////////////////////////////////////////////////////////
// MERGER HELPERS
/////////////////////////////////////////////////////////////////////////////
BYTE sphDoclistHintPack ( SphOffset_t iDocs, SphOffset_t iLen )
{
// we won't really store a hint for small lists
if ( iDocs<DOCLIST_HINT_THRESH )
return 0;
// for bigger lists len/docs varies 4x-6x on test indexes
// so lets assume that 4x-8x should be enough for everybody
SphOffset_t iDelta = Min ( Max ( iLen-4*iDocs, 0 ), 4*iDocs-1 ); // len delta over 4x, clamped to [0x..4x) range
BYTE uHint = (BYTE)( 64*iDelta/iDocs ); // hint now must be in [0..256) range
while ( uHint<255 && ( iDocs*uHint/64 )<iDelta ) // roundoff (suddenly, my guru math skillz failed me)
uHint++;
return uHint;
}
// !COMMIT eliminate this, move to dict (or at least couple with CWordlist)
template<bool WORDDICT>
class CSphDictReader
{
public:
// current word
SphWordID_t m_uWordID = 0;
SphOffset_t m_iDoclistOffset = 0;
int m_iDocs = 0;
int m_iHits = 0;
bool m_bHasHitlist = true;
int m_iHint = 0;
private:
ESphHitless m_eHitless { SPH_HITLESS_NONE };
CSphAutoreader m_tMyReader;
CSphReader * m_pReader = nullptr;
SphOffset_t m_iMaxPos = 0;
int m_iSkiplistBlockSize = 0;
char m_sWord[MAX_KEYWORD_BYTES];
int m_iCheckpoint = 1;
public:
CSphDictReader ( int iSkiplistBlockSize )
: m_iSkiplistBlockSize ( iSkiplistBlockSize )
{
m_sWord[0] = '\0';
}
bool Setup ( const CSphString & sFilename, SphOffset_t iMaxPos, ESphHitless eHitless, CSphString & sError )
{
if ( !m_tMyReader.Open ( sFilename, sError ) )
return false;
Setup ( &m_tMyReader, iMaxPos, eHitless );
return true;
}
void Setup ( CSphReader * pReader, SphOffset_t iMaxPos, ESphHitless eHitless )
{
m_pReader = pReader;
m_pReader->SeekTo ( 1, READ_NO_SIZE_HINT );
m_iMaxPos = iMaxPos;
m_eHitless = eHitless;
m_sWord[0] = '\0';
m_iCheckpoint = 1;
}
bool Read()
{
assert ( m_iSkiplistBlockSize>0 );
if ( m_pReader->GetPos()>=m_iMaxPos )
return false;
// get leading value
SphWordID_t iWord0 = WORDDICT ? m_pReader->GetByte() : m_pReader->UnzipWordid();
if ( !iWord0 )
{
// handle checkpoint
m_iCheckpoint++;
m_pReader->UnzipOffset();
m_uWordID = 0;
m_iDoclistOffset = 0;
m_sWord[0] = '\0';
if ( m_pReader->GetPos()>=m_iMaxPos )
return false;
iWord0 = WORDDICT ? m_pReader->GetByte() : m_pReader->UnzipWordid(); // get next word
}
if ( !iWord0 )
return false; // some failure
// get word entry
if_const ( WORDDICT )
{
// unpack next word
// must be in sync with DictEnd()!
assert ( iWord0<=255 );
auto uPack = (BYTE) iWord0;
int iMatch, iDelta;
if ( uPack & 0x80 )
{
iDelta = ( ( uPack>>4 ) & 7 ) + 1;
iMatch = uPack & 15;
} else
{
iDelta = uPack & 127;
iMatch = m_pReader->GetByte();
}
assert ( iMatch+iDelta<(int)sizeof(m_sWord)-1 );
assert ( iMatch<=(int)strlen(m_sWord) );
m_pReader->GetBytes ( m_sWord + iMatch, iDelta );
m_sWord [ iMatch+iDelta ] = '\0';
m_iDoclistOffset = m_pReader->UnzipOffset();
m_iDocs = m_pReader->UnzipInt();
m_iHits = m_pReader->UnzipInt();
m_iHint = 0;
if ( m_iDocs>=DOCLIST_HINT_THRESH )
m_iHint = m_pReader->GetByte();
if ( m_iDocs > m_iSkiplistBlockSize )
m_pReader->UnzipInt();
m_uWordID = (SphWordID_t) sphCRC32 ( GetWord() ); // set wordID for indexing
} else
{
m_uWordID += iWord0;
m_iDoclistOffset += m_pReader->UnzipOffset();
m_iDocs = m_pReader->UnzipInt();
m_iHits = m_pReader->UnzipInt();
if ( m_iDocs > m_iSkiplistBlockSize )
m_pReader->UnzipOffset();
}
m_bHasHitlist =
( m_eHitless==SPH_HITLESS_NONE ) ||
( m_eHitless==SPH_HITLESS_SOME && !( m_iDocs & HITLESS_DOC_FLAG ) );
m_iDocs = m_eHitless==SPH_HITLESS_SOME ? ( m_iDocs & HITLESS_DOC_MASK ) : m_iDocs;
return true; // FIXME? errorflag?
}
int CmpWord ( const CSphDictReader & tOther ) const
{
if_const ( WORDDICT )
return strcmp ( m_sWord, tOther.m_sWord );
return ( m_uWordID < tOther.m_uWordID ) ? -1 : ( m_uWordID == tOther.m_uWordID ? 0 : 1 );
}
BYTE * GetWord() const { return (BYTE *)const_cast<char*>(m_sWord); }
int GetCheckpoint() const { return m_iCheckpoint; }
};
std::unique_ptr<ISphFilter> CSphIndex_VLN::CreateMergeFilters ( const VecTraits_T<CSphFilterSettings> & dSettings ) const
{
CSphString sError, sWarning;
std::unique_ptr<ISphFilter> pResult;
CreateFilterContext_t tCtx;
tCtx.m_pMatchSchema = &m_tSchema;
tCtx.m_pIndexSchema = &m_tSchema;
tCtx.m_pBlobPool = m_tBlobAttrs.GetReadPtr();
for ( const auto& dSetting : dSettings )
pResult = sphJoinFilters ( std::move ( pResult ), sphCreateFilter ( dSetting, tCtx, sError, sWarning ) );
if ( pResult )
pResult->SetColumnar ( m_pColumnar.get() );
return pResult;
}
bool CheckDocsCount ( int64_t iDocs, CSphString & sError )
{
if ( iDocs<INT_MAX )
return true;
sError.SetSprintf ( "table over %d documents not supported (got " INT64_FMT " documents)", INT_MAX, iDocs );
return false;
}
namespace QwordIteration
{
template<typename QWORD>
inline void PrepareQword ( QWORD & tQword, const CSphDictReader<QWORD::is_worddict::value> & tReader ) //NOLINT
{
tQword.m_tDoc.m_tRowID = INVALID_ROWID;
tQword.m_iDocs = tReader.m_iDocs;
tQword.m_iHits = tReader.m_iHits;
tQword.m_bHasHitlist = tReader.m_bHasHitlist;
tQword.m_uHitPosition = 0;
tQword.m_iHitlistPos = 0;
if_const ( QWORD::is_worddict::value )
tQword.m_rdDoclist->SeekTo ( tReader.m_iDoclistOffset, tReader.m_iHint );
}
template<typename QWORD>
inline bool NextDocument ( QWORD & tQword, const CSphIndex_VLN * pSourceIndex )
{
while (true)
{
tQword.GetNextDoc();
if ( tQword.m_tDoc.m_tRowID==INVALID_ROWID )
return false;
tQword.SeekHitlist ( tQword.m_iHitlistPos );
return true;
}
}
template < typename QWORD >
inline bool NextDocument ( QWORD & tQword, const CSphIndex_VLN * pSourceIndex, const VecTraits_T<RowID_t> & dRows )
{
while (true)
{
tQword.GetNextDoc();
if ( tQword.m_tDoc.m_tRowID==INVALID_ROWID )
return false;
if ( dRows[tQword.m_tDoc.m_tRowID]==INVALID_ROWID )
continue;
tQword.SeekHitlist ( tQword.m_iHitlistPos );
return true;
}
}
template<typename QWORD>
inline void ConfigureQword ( QWORD & tQword, DataReaderFactory_c * pHits, DataReaderFactory_c * pDocs, int iDynamic )
{
tQword.SetHitReader ( pHits );
tQword.m_rdHitlist->SeekTo ( 1, READ_NO_SIZE_HINT );
tQword.SetDocReader ( pDocs );
tQword.m_rdDoclist->SeekTo ( 1, READ_NO_SIZE_HINT );
tQword.m_tDoc.Reset ( iDynamic );
}
}; // namespace QwordIteration
class CSphMerger
{
public:
explicit CSphMerger ( CSphHitBuilder * pHitBuilder )
: m_pHitBuilder ( pHitBuilder )
{}
template < typename QWORD >
inline void TransferData ( QWORD & tQword, SphWordID_t iWordID, const BYTE * szWord,
const CSphIndex_VLN * pSourceIndex, const VecTraits_T<RowID_t>& dRows,
MergeCb_c & tMonitor )
{
AggregateHit_t tHit;
tHit.m_uWordID = iWordID;
tHit.m_szKeyword = szWord;
tHit.m_dFieldMask.UnsetAll();
while ( QwordIteration::NextDocument ( tQword, pSourceIndex, dRows ) && !tMonitor.NeedStop() )
{
if ( tQword.m_bHasHitlist )
TransferHits ( tQword, tHit, dRows );
else
{
// convert to aggregate if there is no hit-list
tHit.m_tRowID = dRows[tQword.m_tDoc.m_tRowID];
tHit.m_dFieldMask = tQword.m_dQwordFields;
tHit.SetAggrCount ( tQword.m_uMatchHits );
m_pHitBuilder->cidxHit ( &tHit );
}
}
}
template < typename QWORD >
inline void TransferHits ( QWORD & tQword, AggregateHit_t & tHit, const VecTraits_T<RowID_t> & dRows )
{
assert ( tQword.m_bHasHitlist );
tHit.m_tRowID = dRows[tQword.m_tDoc.m_tRowID];
for ( Hitpos_t uHit = tQword.GetNextHit(); uHit!=EMPTY_HIT; uHit = tQword.GetNextHit() )
{
tHit.m_iWordPos = uHit;
m_pHitBuilder->cidxHit ( &tHit );
}
}
private:
CSphHitBuilder * m_pHitBuilder;
};
// QWORDDST, QWORDSRC = DiskIndexQword_c
template < typename QWORDDST, typename QWORDSRC >
bool CSphIndex_VLN::MergeWords ( const CSphIndex_VLN * pDstIndex, const CSphIndex_VLN * pSrcIndex, VecTraits_T<RowID_t> dDstRows, VecTraits_T<RowID_t> dSrcRows, CSphHitBuilder * pHitBuilder, CSphString & sError, CSphIndexProgress & tProgress )
{
auto& tMonitor = tProgress.GetMergeCb();
CSphAutofile tDummy;
pHitBuilder->CreateIndexFiles ( pDstIndex->GetTmpFilename ( SPH_EXT_SPD ), pDstIndex->GetTmpFilename ( SPH_EXT_SPP ), pDstIndex->GetTmpFilename ( SPH_EXT_SPE ), false, 0, tDummy );
static_assert ( QWORDDST::is_worddict::value == QWORDDST::is_worddict::value, "can't merge worddict with non-worddict" );
CSphDictReader<QWORDDST::is_worddict::value> tDstReader ( pDstIndex->GetSettings().m_iSkiplistBlockSize );
CSphDictReader<QWORDSRC::is_worddict::value> tSrcReader ( pSrcIndex->GetSettings().m_iSkiplistBlockSize );
/// compress means: I don't want true merge, I just want to apply deadrows and filter
bool bCompress = pDstIndex==pSrcIndex;
if ( !tDstReader.Setup ( pDstIndex->GetFilename ( SPH_EXT_SPI ), pDstIndex->m_tWordlist.GetWordsEnd(), pDstIndex->m_tSettings.m_eHitless, sError ) )
return false;
if ( !bCompress && !tSrcReader.Setup ( pSrcIndex->GetFilename ( SPH_EXT_SPI ), pSrcIndex->m_tWordlist.GetWordsEnd(), pSrcIndex->m_tSettings.m_eHitless, sError ) )
return false;
/// prepare for indexing
pHitBuilder->HitblockBegin();
pHitBuilder->HitReset();
/// setup qwords
QWORDDST tDstQword ( false, false, pDstIndex->GetIndexId() );
QWORDSRC tSrcQword ( false, false, pSrcIndex->GetIndexId() );
DataReaderFactoryPtr_c tSrcDocs {
NewProxyReader ( pSrcIndex->GetFilename ( SPH_EXT_SPD ), sError,
DataReaderFactory_c::DOCS, pSrcIndex->m_tMutableSettings.m_tFileAccess.m_iReadBufferDocList, FileAccess_e::FILE )
};
if ( !tSrcDocs )
return false;
DataReaderFactoryPtr_c tSrcHits {
NewProxyReader ( pSrcIndex->GetFilename ( SPH_EXT_SPP ), sError,
DataReaderFactory_c::HITS, pSrcIndex->m_tMutableSettings.m_tFileAccess.m_iReadBufferHitList, FileAccess_e::FILE )
};
if ( !tSrcHits )
return false;
if ( !sError.IsEmpty () || tMonitor.NeedStop () )
return false;
DataReaderFactoryPtr_c tDstDocs {
NewProxyReader ( pDstIndex->GetFilename ( SPH_EXT_SPD ), sError,
DataReaderFactory_c::DOCS, pDstIndex->m_tMutableSettings.m_tFileAccess.m_iReadBufferDocList, FileAccess_e::FILE )
};
if ( !tDstDocs )
return false;
DataReaderFactoryPtr_c tDstHits {
NewProxyReader ( pDstIndex->GetFilename ( SPH_EXT_SPP ), sError,
DataReaderFactory_c::HITS, pDstIndex->m_tMutableSettings.m_tFileAccess.m_iReadBufferHitList, FileAccess_e::FILE )
};
if ( !tDstHits )
return false;
if ( !sError.IsEmpty() || tMonitor.NeedStop () )
return false;
CSphMerger tMerger(pHitBuilder);
QwordIteration::ConfigureQword<QWORDDST> ( tDstQword, tDstHits, tDstDocs, pDstIndex->m_tSchema.GetDynamicSize() );
QwordIteration::ConfigureQword<QWORDSRC> ( tSrcQword, tSrcHits, tSrcDocs, pSrcIndex->m_tSchema.GetDynamicSize() );
/// merge
bool bDstWord = tDstReader.Read();
bool bSrcWord = !bCompress && tSrcReader.Read();
tProgress.PhaseBegin ( CSphIndexProgress::PHASE_MERGE );
tProgress.Show();
int iWords = 0;
int iHitlistsDiscarded = 0;
for ( ; bDstWord || bSrcWord; ++iWords )
{
if ( iWords==1000 )
{
tProgress.m_iWords += 1000;
tProgress.Show();
iWords = 0;
}
if ( tMonitor.NeedStop () || !sError.IsEmpty () )
return false;
const int iCmp = bCompress ? -1 : tDstReader.CmpWord ( tSrcReader );
if ( !bSrcWord || ( bDstWord && iCmp<0 ) )
{
// transfer documents and hits from destination
QwordIteration::PrepareQword<QWORDDST> ( tDstQword, tDstReader );
tMerger.TransferData<QWORDDST> ( tDstQword, tDstReader.m_uWordID, tDstReader.GetWord(), pDstIndex, dDstRows, tMonitor );
bDstWord = tDstReader.Read();
} else if ( !bDstWord || ( bSrcWord && iCmp>0 ) )
{
// transfer documents and hits from source
QwordIteration::PrepareQword<QWORDSRC> ( tSrcQword, tSrcReader );
tMerger.TransferData<QWORDSRC> ( tSrcQword, tSrcReader.m_uWordID, tSrcReader.GetWord(), pSrcIndex, dSrcRows, tMonitor );
bSrcWord = tSrcReader.Read();
} else // merge documents and hits inside the word
{
assert ( iCmp==0 );
bool bHitless = !tDstReader.m_bHasHitlist;
if ( tDstReader.m_bHasHitlist!=tSrcReader.m_bHasHitlist )
{
++iHitlistsDiscarded;
bHitless = true;
}
QwordIteration::PrepareQword<QWORDDST> ( tDstQword, tDstReader );
QwordIteration::PrepareQword<QWORDSRC> ( tSrcQword, tSrcReader );
AggregateHit_t tHit;
tHit.m_uWordID = tDstReader.m_uWordID; // !COMMIT m_sKeyword anyone?
tHit.m_szKeyword = tDstReader.GetWord();
tHit.m_dFieldMask.UnsetAll();
// we assume that all the duplicates have been removed
// and we don't need to merge hits from the same document
// transfer hits from destination
while ( QwordIteration::NextDocument ( tDstQword, pDstIndex, dDstRows ) )
{
if ( tMonitor.NeedStop () || !sError.IsEmpty () )
return false;
if ( bHitless )
{
while ( tDstQword.m_bHasHitlist && tDstQword.GetNextHit()!=EMPTY_HIT );
tHit.m_tRowID = dDstRows[tDstQword.m_tDoc.m_tRowID];
tHit.m_dFieldMask = tDstQword.m_dQwordFields;
tHit.SetAggrCount ( tDstQword.m_uMatchHits );
pHitBuilder->cidxHit ( &tHit );
} else
tMerger.TransferHits ( tDstQword, tHit, dDstRows );
}
// transfer hits from source
while ( QwordIteration::NextDocument ( tSrcQword, pSrcIndex, dSrcRows ) )
{
if ( tMonitor.NeedStop () || !sError.IsEmpty () )
return false;
if ( bHitless )
{
while ( tSrcQword.m_bHasHitlist && tSrcQword.GetNextHit()!=EMPTY_HIT );
tHit.m_tRowID = dSrcRows[tSrcQword.m_tDoc.m_tRowID];
tHit.m_dFieldMask = tSrcQword.m_dQwordFields;
tHit.SetAggrCount ( tSrcQword.m_uMatchHits );
pHitBuilder->cidxHit ( &tHit );
} else
tMerger.TransferHits ( tSrcQword, tHit, dSrcRows );
}
// next word
bDstWord = tDstReader.Read();
bSrcWord = tSrcReader.Read();
}
}
tProgress.m_iWords += iWords;
tProgress.Show();
if ( iHitlistsDiscarded )
sphWarning ( "discarded hitlists for %u words", iHitlistsDiscarded );
return true;
}
// called only from indexer
bool CSphIndex_VLN::Merge ( CSphIndex * pSource, const VecTraits_T<CSphFilterSettings> & dFilters, bool bSupressDstDocids, CSphIndexProgress& tProgress )
{
StrVec_t dWarnings;
ResetFileAccess ( this );
ResetFileAccess ( pSource );
if ( !Prealloc ( false, nullptr, dWarnings ) )
return false;
Preread();
if ( !pSource->Prealloc ( false, nullptr, dWarnings ) )
{
m_sLastError.SetSprintf ( "source table preload failed: %s", pSource->GetLastError().cstr() );
return false;
}
pSource->Preread();
for ( const auto & i : dWarnings )
sphWarn ( "%s", i.cstr() );
// create filters
std::unique_ptr<ISphFilter> pFilter =CreateMergeFilters ( dFilters );
return DoMerge ( this, (const CSphIndex_VLN *)pSource, pFilter.get(), m_sLastError, tProgress, false, bSupressDstDocids );
}
std::pair<DWORD,DWORD> CSphIndex_VLN::CreateRowMapsAndCountTotalDocs ( const CSphIndex_VLN* pSrcIndex, const CSphIndex_VLN* pDstIndex, CSphFixedVector<RowID_t>& dSrcRowMap, CSphFixedVector<RowID_t>& dDstRowMap, const ISphFilter* pFilter, bool bSupressDstDocids, MergeCb_c& tMonitor )
{
TRACE_CORO ( "sph", "CSphIndex_VLN::CreateRowMapsAndCountTotalDocs" );
if ( pSrcIndex!=pDstIndex )
dSrcRowMap.Reset ( pSrcIndex->m_iDocinfo );
dDstRowMap.Reset ( pDstIndex->m_iDocinfo );
int iStride = pDstIndex->m_tSchema.GetRowSize();
DeadRowMap_Ram_c tExtraDeadMap(0);
if ( bSupressDstDocids ) // skip monitoring supressions as they're used _only_ from indexer call and never from flavours of optimize.
{
tExtraDeadMap.Reset ( dDstRowMap.GetLength() );
LookupReaderIterator_c tDstLookupReader ( pDstIndex->m_tDocidLookup.GetReadPtr() );
LookupReaderIterator_c tSrcLookupReader ( pSrcIndex->m_tDocidLookup.GetReadPtr() );
KillByLookup ( tDstLookupReader, tSrcLookupReader, tExtraDeadMap );
}
dSrcRowMap.Fill ( INVALID_ROWID );
dDstRowMap.Fill ( INVALID_ROWID );
const DWORD * pRow = pDstIndex->m_tAttr.GetReadPtr();
int64_t iTotalDocs = 0;
std::pair<DWORD, DWORD> tPerIndexDocs {0,0};
// say to observer we're going to collect alive rows from dst index
// (kills directed to that index must be collected to reapply at the finish)
BEGIN_CORO ( "sph", "collect dst rowmap");
tMonitor.SetEvent ( MergeCb_c::E_COLLECT_START, pDstIndex->m_iChunk );
for ( RowID_t i = 0; i < dDstRowMap.GetLength(); ++i, pRow+=iStride )
{
if ( pDstIndex->m_tDeadRowMap.IsSet(i) )
continue;
if ( bSupressDstDocids && tExtraDeadMap.IsSet ( i ) )
continue;
if ( pFilter && !pFilter->Eval ( { i, pRow } ) )
continue;
dDstRowMap[i] = (RowID_t)iTotalDocs++;
}
tMonitor.SetEvent ( MergeCb_c::E_COLLECT_FINISHED, pDstIndex->m_iChunk );
END_CORO ( "sph" );
tPerIndexDocs.first = (DWORD)iTotalDocs;
if ( dSrcRowMap.IsEmpty() )
return tPerIndexDocs;
// say to observer we're going to collect alive rows from src index (again, issue to kills).
BEGIN_CORO ( "sph", "collect src rowmap" );
tMonitor.SetEvent ( MergeCb_c::E_COLLECT_START, pSrcIndex->m_iChunk );
for ( int i = 0; i < dSrcRowMap.GetLength(); ++i )
{
if ( pSrcIndex->m_tDeadRowMap.IsSet(i) )
continue;
dSrcRowMap[i] = (RowID_t)iTotalDocs++;
}
tMonitor.SetEvent ( MergeCb_c::E_COLLECT_FINISHED, pSrcIndex->m_iChunk );
tPerIndexDocs.second = DWORD ( iTotalDocs - tPerIndexDocs.first );
END_CORO ( "sph" );
return tPerIndexDocs;
}
bool CSphIndex_VLN::DoMerge ( const CSphIndex_VLN * pDstIndex, const CSphIndex_VLN * pSrcIndex, const ISphFilter * pFilter, CSphString & sError, CSphIndexProgress & tProgress, bool bSrcSettings, bool bSupressDstDocids )
{
TRACE_CORO ( "sph", "CSphIndex_VLN::DoMerge" );
auto & tMonitor = tProgress.GetMergeCb();
assert ( pDstIndex && pSrcIndex );
/// 'merge with self' - only apply filters/kill-lists, no real merge
bool bCompress = pSrcIndex==pDstIndex;
const CSphSchema & tDstSchema = pDstIndex->m_tSchema;
const CSphSchema & tSrcSchema = pSrcIndex->m_tSchema;
if ( !bCompress && !tDstSchema.CompareTo ( tSrcSchema, sError ) )
return false;
if ( !bCompress && pDstIndex->m_tSettings.m_eHitless!=pSrcIndex->m_tSettings.m_eHitless )
{
sError = "hitless settings must be the same on merged tables";
return false;
}
if ( !bCompress && pDstIndex->m_pDict->GetSettings().m_bWordDict!=pSrcIndex->m_pDict->GetSettings().m_bWordDict )
{
sError.SetSprintf ( "dictionary types must be the same (dst dict=%s, src dict=%s )",
pDstIndex->m_pDict->GetSettings().m_bWordDict ? "keywords" : "crc",
pSrcIndex->m_pDict->GetSettings().m_bWordDict ? "keywords" : "crc" );
return false;
}
// Create global list of documents to be merged from both sources
// here we can also quickly consider to reject whole merge if N of final docs is >4G
CSphFixedVector<RowID_t> dSrcRows{0}, dDstRows{0};
auto tTotalDocs = CreateRowMapsAndCountTotalDocs ( pSrcIndex, pDstIndex, dSrcRows, dDstRows, pFilter, bSupressDstDocids, tMonitor );
int64_t iTotalDocs = (int64_t)tTotalDocs.first + (int64_t)tTotalDocs.second;
if ( iTotalDocs >= INVALID_ROWID )
return false; // too many docs in merged segment (>4G even with filtered/killed), abort.
BuildHeader_t tBuildHeader ( pDstIndex->m_tStats );
// merging attributes is separate complete stage; files are finally prepared after it.
// however, if interrupt is requested after that stage - we need list of the files
// to gracefully unlink them.
StrVec_t dDeleteOnInterrupt;
// unlink prepared attribute files on exit, if any
AT_SCOPE_EXIT ( [&dDeleteOnInterrupt]
{
dDeleteOnInterrupt.for_each ( [] ( const auto & sFile )
{
if ( !sFile.IsEmpty() && sphFileExists ( sFile.cstr() ) )
::unlink ( sFile.cstr() );
} );
});
// merging attributes
{
AttrMerger_c tAttrMerger { tMonitor, sError, iTotalDocs, g_tMergeSettings };
if ( !tAttrMerger.Prepare ( pSrcIndex, pDstIndex ) )
return false;
if ( !tAttrMerger.CopyAttributes ( *pDstIndex, dDstRows, tTotalDocs.first ) )
return false;
if ( !bCompress && !tAttrMerger.CopyAttributes ( *pSrcIndex, dSrcRows, tTotalDocs.second ) )
return false;
if ( !tAttrMerger.FinishMergeAttributes ( pDstIndex, tBuildHeader, &dDeleteOnInterrupt ) )
return false;
}
const CSphIndex_VLN* pSettings = ( bSrcSettings ? pSrcIndex : pDstIndex );
CSphAutofile tDict ( pDstIndex->GetTmpFilename ( SPH_EXT_SPI ), SPH_O_NEW, sError, true );
if ( !sError.IsEmpty() || tDict.GetFD()<0 || tMonitor.NeedStop() )
return false;
DictRefPtr_c pDict { pSettings->m_pDict->Clone() };
CSphVector<SphWordID_t> dDummy;
CSphHitBuilder tHitBuilder ( pSettings->m_tSettings, dDummy, true, g_tMergeSettings.m_iBufferDict, pDict, &sError, &dDeleteOnInterrupt );
int iInfixCodepointBytes = 0;
if ( pSettings->m_tSettings.m_iMinInfixLen > 0 && pDict->GetSettings().m_bWordDict )
iInfixCodepointBytes = pSettings->m_pTokenizer->GetMaxCodepointLength();
// FIXME? is this magic dict block constant any good?..
pDict->SortedDictBegin ( tDict, g_tMergeSettings.m_iBufferDict, iInfixCodepointBytes );
BEGIN_CORO ( "sph", "merge dicts, doclists and hitlists" );
// merge dictionaries, doclists and hitlists
if ( pDict->GetSettings().m_bWordDict )
{
WITH_QWORD ( pDstIndex, false, QwordDst,
WITH_QWORD ( pSrcIndex, false, QwordSrc,
if ( !CSphIndex_VLN::MergeWords < QwordDst, QwordSrc > ( pDstIndex, pSrcIndex, dDstRows, dSrcRows, &tHitBuilder, sError, tProgress ) )
return false;
));
} else
{
WITH_QWORD ( pDstIndex, true, QwordDst,
WITH_QWORD ( pSrcIndex, true, QwordSrc,
if ( !CSphIndex_VLN::MergeWords < QwordDst, QwordSrc > ( pDstIndex, pSrcIndex, dDstRows, dSrcRows, &tHitBuilder, sError, tProgress ) )
return false;
));
}
END_CORO ( "sph" );
if ( tMonitor.NeedStop () || !sError.IsEmpty() )
return false;
// finalize
AggregateHit_t tFlush;
tFlush.m_tRowID = INVALID_ROWID;
tFlush.m_uWordID = 0;
tFlush.m_szKeyword = (const BYTE*)""; // tricky: assertion in cidxHit calls strcmp on this in case of empty index!
tFlush.m_iWordPos = EMPTY_HIT;
tFlush.m_dFieldMask.UnsetAll();
tHitBuilder.cidxHit ( &tFlush );
int iMinInfixLen = pSettings->m_tSettings.m_iMinInfixLen;
if ( !tHitBuilder.cidxDone ( g_tMergeSettings.m_iBufferDict, iMinInfixLen, pSettings->m_pTokenizer->GetMaxCodepointLength(), &tBuildHeader ) )
return false;
WriteHeader_t tWriteHeader;
tWriteHeader.m_pSettings = &pSettings->m_tSettings;
tWriteHeader.m_pSchema = &pSettings->m_tSchema;
tWriteHeader.m_pTokenizer = pSettings->m_pTokenizer;
tWriteHeader.m_pDict = pSettings->m_pDict;
tWriteHeader.m_pFieldFilter = pSettings->m_pFieldFilter.get();
tWriteHeader.m_pFieldLens = pSettings->m_dFieldLens.Begin();
IndexBuildDone ( tBuildHeader, tWriteHeader, pDstIndex->GetTmpFilename ( SPH_EXT_SPH ), sError );
// we're done; clean all deferred deletes
tDict.SetPersistent();
dDeleteOnInterrupt.Reset();
return true;
}
bool sphMerge ( const CSphIndex * pDst, const CSphIndex * pSrc, VecTraits_T<CSphFilterSettings> dFilters, CSphIndexProgress & tProgress, CSphString& sError )
{
TRACE_CORO ( "sph", "sphMerge" );
auto pDstIndex = (const CSphIndex_VLN*) pDst;
auto pSrcIndex = (const CSphIndex_VLN*) pSrc;
std::unique_ptr<ISphFilter> pFilter = pDstIndex->CreateMergeFilters ( dFilters );
return CSphIndex_VLN::DoMerge ( pDstIndex, pSrcIndex, pFilter.get(), sError, tProgress, dFilters.IsEmpty(), false );
}
template < typename QWORD >
bool CSphIndex_VLN::DeleteField ( const CSphIndex_VLN * pIndex, CSphHitBuilder * pHitBuilder, CSphString & sError, CSphSourceStats & tStat, int iKillField )
{
assert ( iKillField>=0 );
CSphAutofile tDummy;
pHitBuilder->CreateIndexFiles ( pIndex->GetTmpFilename ( SPH_EXT_SPD ), pIndex->GetTmpFilename ( SPH_EXT_SPP ), pIndex->GetTmpFilename ( SPH_EXT_SPE ), false, 0, tDummy );
CSphDictReader<QWORD::is_worddict::value> tWordsReader ( pIndex->GetSettings().m_iSkiplistBlockSize );
if ( !tWordsReader.Setup ( pIndex->GetFilename ( SPH_EXT_SPI ), pIndex->m_tWordlist.GetWordsEnd(), pIndex->m_tSettings.m_eHitless, sError ) )
return false;
/// prepare for indexing
pHitBuilder->HitblockBegin();
pHitBuilder->HitReset();
/// setup qword
QWORD tQword ( false, false, pIndex->GetIndexId() );
DataReaderFactoryPtr_c tDocs {
NewProxyReader ( pIndex->GetFilename ( SPH_EXT_SPD ), sError,
DataReaderFactory_c::DOCS, pIndex->m_tMutableSettings.m_tFileAccess.m_iReadBufferDocList, FileAccess_e::FILE )
};
if ( !tDocs )
return false;
DataReaderFactoryPtr_c tHits {
NewProxyReader ( pIndex->GetFilename ( SPH_EXT_SPP ), sError,
DataReaderFactory_c::HITS, pIndex->m_tMutableSettings.m_tFileAccess.m_iReadBufferHitList, FileAccess_e::FILE )
};
if ( !tHits )
return false;
if ( !sError.IsEmpty () || sphInterrupted () )
return false;
QwordIteration::ConfigureQword ( tQword, tHits, tDocs, pIndex->m_tSchema.GetDynamicSize() );
/// process
while ( tWordsReader.Read () )
{
if ( sphInterrupted () )
return false;
bool bHitless = !tWordsReader.m_bHasHitlist;
QwordIteration::PrepareQword ( tQword, tWordsReader );
AggregateHit_t tHit;
tHit.m_uWordID = tWordsReader.m_uWordID; // !COMMIT m_sKeyword anyone?
tHit.m_szKeyword = tWordsReader.GetWord();
tHit.m_dFieldMask.UnsetAll();
// transfer hits
while ( QwordIteration::NextDocument ( tQword, pIndex ) )
{
if ( sphInterrupted () )
return false;
tHit.m_tRowID = tQword.m_tDoc.m_tRowID;
if ( bHitless )
{
tHit.m_dFieldMask = tQword.m_dQwordFields; // fixme! what field mask on hitless? m.b. write 0 here?
tHit.m_dFieldMask.DeleteBit (iKillField);
if ( tHit.m_dFieldMask.TestAll ( false ) )
continue;
tHit.SetAggrCount ( tQword.m_uMatchHits );
pHitBuilder->cidxHit ( &tHit );
} else
{
assert ( tQword.m_bHasHitlist );
for ( Hitpos_t uHit = tQword.GetNextHit(); uHit!=EMPTY_HIT; uHit = tQword.GetNextHit() )
{
int iField = HITMAN::GetField ( uHit );
if ( iKillField==iField )
continue;
if ( iField>iKillField )
HITMAN::DecrementField ( uHit );
tHit.m_iWordPos = uHit;
pHitBuilder->cidxHit ( &tHit );
}
}
}
}
return true;
}
bool CSphIndex_VLN::DeleteFieldFromDict ( int iFieldId, BuildHeader_t & tBuildHeader, CSphString & sError )
{
CSphAutofile tNewDict ( GetTmpFilename ( SPH_EXT_SPI ), SPH_O_NEW, sError );
if ( !sError.IsEmpty () || tNewDict.GetFD ()<0 || sphInterrupted () )
return false;
DictRefPtr_c pDict { m_pDict->Clone () };
int iHitBufferSize = 8 * 1024 * 1024;
CSphVector<SphWordID_t> dDummy;
CSphHitBuilder tHitBuilder ( m_tSettings, dDummy, true, iHitBufferSize, pDict, &sError, nullptr );
// FIXME? is this magic dict block constant any good?..
int iInfixCodepointBytes = 0;
if ( m_tSettings.m_iMinInfixLen > 0 && pDict->GetSettings().m_bWordDict )
iInfixCodepointBytes = m_pTokenizer->GetMaxCodepointLength();
pDict->SortedDictBegin ( tNewDict, iHitBufferSize, iInfixCodepointBytes );
// merge dictionaries, doclists and hitlists
if ( pDict->GetSettings().m_bWordDict )
{
WITH_QWORD ( this, false, Qword,
if ( !CSphIndex_VLN::DeleteField <Qword> ( this, &tHitBuilder, sError, tBuildHeader, iFieldId ) )
return false;
);
} else
{
WITH_QWORD ( this, true, Qword,
if ( !CSphIndex_VLN::DeleteField <Qword> ( this, &tHitBuilder, sError, tBuildHeader, iFieldId ) )
return false;
);
}
if ( sphInterrupted() )
return false;
// finalize
AggregateHit_t tFlush;
tFlush.m_tRowID = INVALID_ROWID;
tFlush.m_uWordID = 0;
tFlush.m_szKeyword = (const BYTE*)""; // tricky: assertion in cidxHit calls strcmp on this in case of empty index!
tFlush.m_iWordPos = EMPTY_HIT;
tFlush.m_dFieldMask.UnsetAll();
tHitBuilder.cidxHit ( &tFlush );
int iMinInfixLen = m_tSettings.m_iMinInfixLen;
if ( !tHitBuilder.cidxDone ( iHitBufferSize, iMinInfixLen, m_pTokenizer->GetMaxCodepointLength(), &tBuildHeader ) )
return false;
/// as index is w-locked, we can also detach doclist/hitlist/dictionary and juggle them.
tNewDict.Close();
m_tWordlist.Reset();
if ( !JuggleFile ( SPH_EXT_SPI, sError ) ) return false;
m_tWordlist.m_iDictCheckpointsOffset= tBuildHeader.m_iDictCheckpointsOffset;
m_tWordlist.m_iDictCheckpoints = tBuildHeader.m_iDictCheckpoints;
m_tWordlist.m_iInfixCodepointBytes = tBuildHeader.m_iInfixCodepointBytes;
m_tWordlist.m_iInfixBlocksOffset = tBuildHeader.m_iInfixBlocksOffset;
m_tWordlist.m_iInfixBlocksWordsSize = tBuildHeader.m_iInfixBlocksWordsSize;
m_tWordlist.m_dCheckpoints.Reset ( m_tWordlist.m_iDictCheckpoints );
if ( !PreallocWordlist() ) return false;
m_tSkiplists.Reset ();
if ( !JuggleFile ( SPH_EXT_SPE, sError ) ) return false;
if ( !PreallocSkiplist() ) return false;
m_pDoclistFile = nullptr;
m_pHitlistFile = nullptr;
if ( !JuggleFile ( SPH_EXT_SPD, sError ) ) return false;
if ( !JuggleFile ( SPH_EXT_SPP, sError ) ) return false;
if ( !SpawnReaders() ) return false;
return true;
}
bool CSphIndex_VLN::AddRemoveFromDocstore ( const CSphSchema & tOldSchema, const CSphSchema & tNewSchema, CSphString & sError )
{
int iOldNumStored = 0;
for ( int i = 0; i < tOldSchema.GetFieldsCount(); i++ )
if ( tOldSchema.IsFieldStored(i) )
iOldNumStored++;
for ( int i = 0; i < tOldSchema.GetAttrsCount(); i++ )
if ( tOldSchema.IsAttrStored(i) )
iOldNumStored++;
int iNewNumStored = 0;
for ( int i = 0; i < tNewSchema.GetFieldsCount(); i++ )
if ( tNewSchema.IsFieldStored(i) )
iNewNumStored++;
for ( int i = 0; i < tNewSchema.GetAttrsCount(); i++ )
if ( tNewSchema.IsAttrStored(i) )
iOldNumStored++;
if ( iOldNumStored==iNewNumStored )
return true;
std::unique_ptr<DocstoreBuilder_i> pDocstoreBuilder;
if ( iNewNumStored )
{
DocstoreSettings_t tDefault;
const DocstoreSettings_t & tDocstoreSettings = m_pDocstore ? m_pDocstore->GetDocstoreSettings () : tDefault;
BuildBufferSettings_t tSettings; // use default buffer settings
pDocstoreBuilder = CreateDocstoreBuilder ( GetTmpFilename ( SPH_EXT_SPDS ), tDocstoreSettings, tSettings.m_iBufferStorage, sError );
if ( !pDocstoreBuilder )
return false;
Alter_AddRemoveFromDocstore ( *pDocstoreBuilder, m_pDocstore.get(), (DWORD)m_iDocinfo, tNewSchema );
}
if ( !JuggleFile ( SPH_EXT_SPDS, sError, !!iOldNumStored, !!iNewNumStored ) )
return false;
m_pDocstore.reset();
PreallocDocstore();
return true;
}
bool CSphIndex_VLN::AddRemoveField ( bool bAddField, const CSphString & sFieldName, DWORD uFieldFlags, CSphString & sError )
{
CSphSchema tOldSchema = m_tSchema;
CSphSchema tNewSchema = m_tSchema;
if ( !Alter_AddRemoveFieldFromSchema ( bAddField, tNewSchema, sFieldName, uFieldFlags, sError ) )
return false;
auto iRemoveIdx = m_tSchema.GetFieldIndex ( sFieldName.cstr () );
m_tSchema = tNewSchema;
BuildHeader_t tBuildHeader;
WriteHeader_t tWriteHeader;
PrepareHeaders ( tBuildHeader, tWriteHeader );
if ( !bAddField && !DeleteFieldFromDict ( iRemoveIdx, tBuildHeader, sError ) )
return false;
if ( !AddRemoveFromDocstore ( tOldSchema, tNewSchema, sError ) )
return false;
tWriteHeader.m_pSchema = &tNewSchema;
// save the header
if ( !IndexBuildDone ( tBuildHeader, tWriteHeader, GetTmpFilename ( SPH_EXT_SPH ), sError ) )
return false;
return JuggleFile ( SPH_EXT_SPH, sError );
}
/////////////////////////////////////////////////////////////////////////////
void CSphMatchComparatorState::FixupLocators ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema, bool bRemapKeyparts )
{
for ( int i = 0; i < CSphMatchComparatorState::MAX_ATTRS; ++i )
{
sphFixupLocator ( m_tLocator[i], pOldSchema, pNewSchema );
// update string keypart into str_ptr
if ( bRemapKeyparts && m_eKeypart[i]==SPH_KEYPART_STRING )
m_eKeypart[i] = SPH_KEYPART_STRINGPTR;
// update columnar attrs
if ( pOldSchema )
{
int iOldAttrId = m_dAttrs[i];
if ( iOldAttrId!=-1 )
m_dAttrs[i] = pNewSchema->GetAttrIndex ( pOldSchema->GetAttr(iOldAttrId).m_sName.cstr() );
}
}
}
//////////////////////////////////////////////////////////////////////////
inline bool sphGroupMatch ( SphAttr_t iGroup, const SphAttr_t * pGroups, int iGroups )
{
if ( !pGroups ) return true;
const SphAttr_t * pA = pGroups;
const SphAttr_t * pB = pGroups+iGroups-1;
if ( iGroup==*pA || iGroup==*pB ) return true;
if ( iGroup<(*pA) || iGroup>(*pB) ) return false;
while ( pB-pA>1 )
{
const SphAttr_t * pM = pA + ((pB-pA)/2);
if ( iGroup==(*pM) )
return true;
if ( iGroup<(*pM) )
pB = pM;
else
pA = pM;
}
return false;
}
bool CSphIndex_VLN::EarlyReject ( CSphQueryContext * pCtx, CSphMatch & tMatch ) const
{
tMatch.m_pStatic = GetDocinfoByRowID ( tMatch.m_tRowID );
pCtx->CalcFilter ( tMatch );
if ( !pCtx->m_pFilter )
return false;
if ( !pCtx->m_pFilter->Eval ( tMatch ) )
{
pCtx->FreeDataFilter ( tMatch );
return true;
}
return false;
}
CSphVector<SphAttr_t> CSphIndex_VLN::BuildDocList () const
{
TlsMsg::ResetErr(); // clean err
CSphVector<SphAttr_t> dResult;
if ( !m_iDocinfo )
return dResult;
// new[] might fail on 32bit here
int64_t iSizeMax = (size_t)m_iDocinfo;
if ( iSizeMax!=m_iDocinfo )
{
TlsMsg::Err ( "doc-list build size_t overflow (docs count=%l, size max=%l)", m_iDocinfo, iSizeMax );
return dResult;
}
int iStride = m_tSchema.GetRowSize();
dResult.Resize ( m_iDocinfo );
const CSphRowitem * pRow = m_tAttr.GetReadPtr();
for ( SphAttr_t & tDst : dResult )
{
tDst = sphGetDocID ( pRow );
pRow += iStride;
}
dResult.Uniq();
return dResult;
}
RowID_t CSphIndex_VLN::GetRowidByDocid ( DocID_t tDocID ) const
{
return m_tLookupReader.Find ( tDocID );
}
int CSphIndex_VLN::Kill ( DocID_t tDocID )
{
// FIXME! docid might not be unique
if ( m_tDeadRowMap.Set ( GetRowidByDocid ( tDocID ) ) )
{
m_uAttrsStatus |= IndexSegment_c::ATTRS_ROWMAP_UPDATED;
KillHook ( tDocID );
return 1;
}
return 0;
}
bool CSphIndex_VLN::IsAlive ( DocID_t tDocID ) const
{
RowID_t tRow = GetRowidByDocid ( tDocID );
if ( tRow==INVALID_ROWID )
return false;
return ( !m_tDeadRowMap.IsSet ( tRow ) );
}
inline const CSphRowitem * CSphIndex_VLN::FindDocinfo ( DocID_t tDocID ) const
{
RowID_t tRowID = GetRowidByDocid ( tDocID );
return tRowID==INVALID_ROWID ? nullptr : GetDocinfoByRowID ( tRowID );
}
inline const CSphRowitem * CSphIndex_VLN::GetDocinfoByRowID ( RowID_t tRowID ) const
{
// GetCachedRowSize() is used to avoid several virtual calls
return m_tAttr.GetReadPtr() + (int64_t)tRowID*m_tSchema.GetCachedRowSize();
}
inline RowID_t CSphIndex_VLN::GetRowIDByDocinfo ( const CSphRowitem * pDocinfo ) const
{
return RowID_t ( ( pDocinfo - m_tAttr.GetReadPtr() ) / m_tSchema.GetCachedRowSize() );
}
template<bool USE_KLIST, bool RANDOMIZE, bool USE_FACTORS, bool HAS_SORT_CALC, bool HAS_WEIGHT_FILTER, bool HAS_FILTER_CALC, bool HAS_CUTOFF>
void CSphIndex_VLN::MatchExtended ( CSphQueryContext& tCtx, const CSphQuery & tQuery, const VecTraits_T<ISphMatchSorter *> & dSorters, ISphRanker * pRanker, int iTag, int iIndexWeight, int iCutoff ) const
{
if ( !iCutoff )
return;
QueryProfile_c * pProfile = tCtx.m_pProfile;
CSphScopedProfile tProf (pProfile, SPH_QSTATE_UNKNOWN);
if constexpr ( USE_FACTORS )
pRanker->ExtraData ( EXTRA_SET_MATCHTAG, (void**)&iTag );
// do searching
CSphMatch * pMatch = pRanker->GetMatchesBuffer();
while (true)
{
// ranker does profile switches internally in GetMatches()
int iMatches = pRanker->GetMatches();
if ( iMatches<=0 )
break;
SwitchProfile ( pProfile, SPH_QSTATE_SORT );
for ( int i=0; i<iMatches; i++ )
{
CSphMatch & tMatch = pMatch[i];
if constexpr ( USE_KLIST )
{
if ( m_tDeadRowMap.IsSet ( tMatch.m_tRowID ) )
continue;
}
tMatch.m_iWeight *= iIndexWeight;
if constexpr ( HAS_SORT_CALC )
tCtx.CalcSort ( tMatch );
if constexpr ( HAS_WEIGHT_FILTER )
{
if ( tCtx.m_pWeightFilter && !tCtx.m_pWeightFilter->Eval ( tMatch ) )
{
if constexpr ( HAS_SORT_CALC )
tCtx.FreeDataSort ( tMatch );
continue;
}
}
tMatch.m_iTag = iTag;
bool bRand = false;
bool bNewMatch = false;
for ( ISphMatchSorter * pSorter: dSorters )
{
// all non-random sorters are in the beginning,
// so we can avoid the simple 'first-element' assertion
if constexpr ( RANDOMIZE )
{
if ( !bRand && pSorter->IsRandom() )
{
bRand = true;
tMatch.m_iWeight = ( sphRand() & 0xffff ) * iIndexWeight;
if constexpr ( HAS_WEIGHT_FILTER )
{
if ( tCtx.m_pWeightFilter && !tCtx.m_pWeightFilter->Eval ( tMatch ) )
break;
}
}
}
bNewMatch |= pSorter->Push ( tMatch );
if constexpr ( USE_FACTORS )
{
RowTagged_t tJustPushed = pSorter->GetJustPushed();
VecTraits_T<RowTagged_t> dJustPopped = pSorter->GetJustPopped();
pRanker->ExtraData ( EXTRA_SET_MATCHPUSHED, (void**)&tJustPushed );
pRanker->ExtraData ( EXTRA_SET_MATCHPOPPED, (void**)&dJustPopped );
}
}
if constexpr ( HAS_FILTER_CALC )
tCtx.FreeDataFilter ( tMatch );
if constexpr ( HAS_SORT_CALC )
tCtx.FreeDataSort ( tMatch );
if constexpr ( HAS_CUTOFF )
{
if ( bNewMatch && --iCutoff==0 )
break;
}
}
if constexpr ( HAS_CUTOFF )
{
if ( !iCutoff )
break;
}
}
}
//////////////////////////////////////////////////////////////////////////
struct SphFinalMatchCalc_t final : MatchProcessor_i, ISphNoncopyable
{
const CSphQueryContext & m_tCtx;
int m_iTag;
SphFinalMatchCalc_t ( int iTag, const CSphQueryContext & tCtx )
: m_tCtx ( tCtx )
, m_iTag ( iTag )
{}
bool ProcessInRowIdOrder() const final
{
// columnar expressions don't like random access, they are optimized for sequental access
// that's why if we have a columnar expression, we need to call Process it ascending RowId order
return m_tCtx.m_dCalcFinal.any_of ( []( const ContextCalcItem_t & i ){ return i.m_pExpr && i.m_pExpr->IsColumnar(); } );
}
void Process ( CSphMatch * pMatch ) final
{
// fixme! tag is signed int,
// for distr. tags from remotes set with | 0x80000000,
// i e in terms of signed int they're <0!
// Is it intention, or bug?
// If intention, lt us use uniformely either <0, either &0x80000000
// conditions to avoid messing. If bug, shit already happened!
if ( pMatch->m_iTag>=0 )
return;
m_tCtx.CalcFinal ( *pMatch );
pMatch->m_iTag = m_iTag;
}
void Process ( VecTraits_T<CSphMatch *> & dMatches ) final
{
CSphVector<ContextCalcItem_t *> dColumnWise, dRowWise;
// process columnar items in column-wise order (and the rest in rowwise order)
for ( auto & i : m_tCtx.m_dCalcFinal )
if ( i.m_pExpr->IsColumnar() )
dColumnWise.Add(&i);
else
dRowWise.Add(&i);
for ( const auto & pItem : dColumnWise )
for ( auto & pMatch : dMatches )
{
assert(pMatch);
if ( pMatch->m_iTag>=0 )
continue;
m_tCtx.CalcItem ( *pMatch, *pItem );
}
for ( auto & pMatch : dMatches )
for ( const auto & pItem : dRowWise )
{
assert(pMatch);
if ( pMatch->m_iTag>=0 )
continue;
m_tCtx.CalcItem ( *pMatch, *pItem );
}
for ( auto & pMatch : dMatches )
{
assert(pMatch);
if ( pMatch->m_iTag>=0 )
continue;
pMatch->m_iTag = m_iTag;
}
}
};
/// scoped worker scheduling helper
/// makes quantum of current task smaller
class ScopedLowPriority_c : public ISphNonCopyMovable
{
int64_t m_iStoredThrottlingPeriodUS;
public:
ScopedLowPriority_c()
: m_iStoredThrottlingPeriodUS { Threads::Coro::GetThrottlingPeriodUS() }
{
if ( m_iStoredThrottlingPeriodUS > 0 )
Threads::Coro::SetThrottlingPeriodUS ( Max ( m_iStoredThrottlingPeriodUS / g_iLowPriorityDivisor, 1 ) );
}
~ScopedLowPriority_c()
{
if ( m_iStoredThrottlingPeriodUS > 0 )
Threads::Coro::SetThrottlingPeriodUS ( m_iStoredThrottlingPeriodUS );
}
};
//////////////////////////////////////////////////////////////////////////
// the iterator does not support cutoff
// overwise tIterator.WasCutoffHit() at the Fullscan ends iterating blocks after 0 block fully scanned (!m_iRowsLeft)
// and for small cuttoff itrator scans only up to cuttoff rows in each block
template <bool HAVE_DEAD>
class RowIterator_T : public ISphNoncopyable
{
public:
RowIterator_T ( const RowIdBoundaries_t & tBoundaries, const DeadRowMap_Disk_c & tDeadRowMap )
: m_tRowID ( tBoundaries.m_tMinRowID )
, m_tBoundaries ( tBoundaries )
, m_tDeadRowMap ( tDeadRowMap )
{}
FORCE_INLINE bool GetNextRowIdBlock ( RowIdBlock_t & dRowIdBlock );
DWORD GetNumProcessed() const { return m_tRowID-m_tBoundaries.m_tMinRowID; }
bool WasCutoffHit() const { return false; }
private:
static const int MAX_COLLECTED = 128;
RowID_t m_tRowID {INVALID_ROWID};
RowIdBoundaries_t m_tBoundaries;
CSphFixedVector<RowID_t> m_dCollected {MAX_COLLECTED}; // store 128 values (same as .spa attr block size)
const DeadRowMap_Disk_c & m_tDeadRowMap;
};
template <>
bool RowIterator_T<true>::GetNextRowIdBlock ( RowIdBlock_t & dRowIdBlock )
{
RowID_t * pRowIdStart = m_dCollected.Begin();
RowID_t * pRowIdMax = pRowIdStart + m_dCollected.GetLength();
RowID_t * pRowID = pRowIdStart;
while ( pRowID<pRowIdMax && m_tRowID<=m_tBoundaries.m_tMaxRowID )
{
if ( !m_tDeadRowMap.IsSet(m_tRowID) )
*pRowID++ = m_tRowID;
m_tRowID++;
}
return ReturnIteratorResult ( pRowID, pRowIdStart, dRowIdBlock );
}
template <>
bool RowIterator_T<false>::GetNextRowIdBlock ( RowIdBlock_t & dRowIdBlock )
{
RowID_t * pRowIdStart = m_dCollected.Begin();
int64_t iDelta = Min ( RowID_t(m_dCollected.GetLength()), int64_t(m_tBoundaries.m_tMaxRowID)-m_tRowID+1 );
assert ( iDelta>=0 );
RowID_t * pRowIdMax = pRowIdStart + iDelta;
RowID_t * pRowID = pRowIdStart;
// fixme! use sse?
while ( pRowID<pRowIdMax )
*pRowID++ = m_tRowID++;
return ReturnIteratorResult ( pRowID, pRowIdStart, dRowIdBlock );
}
// adds killlist filtering to a rowid iterator
class RowIteratorAlive_c : public ISphNoncopyable
{
public:
RowIteratorAlive_c ( RowidIterator_i * pIterator, const DeadRowMap_Disk_c & tDeadRowMap )
: m_pIterator ( pIterator )
, m_tDeadRowMap ( tDeadRowMap )
{
assert(pIterator);
}
FORCE_INLINE bool GetNextRowIdBlock ( RowIdBlock_t & dRowIdBlock )
{
RowIdBlock_t dIteratorRowIDs;
if ( !m_pIterator->GetNextRowIdBlock(dIteratorRowIDs) )
return false;
m_dCollected.Resize ( dIteratorRowIDs.GetLength() );
RowID_t * pRowIdStart = m_dCollected.Begin();
RowID_t * pRowID = pRowIdStart;
for ( auto i : dIteratorRowIDs )
{
if ( !m_tDeadRowMap.IsSet(i) )
*pRowID++ = i;
}
dRowIdBlock = RowIdBlock_t ( pRowIdStart, pRowID - pRowIdStart );
return true; // always return true, even if all values were filtered out. next call will fetch more values
}
DWORD GetNumProcessed() const { return (DWORD)m_pIterator->GetNumProcessed(); }
bool WasCutoffHit() const { return m_pIterator->WasCutoffHit(); }
private:
RowidIterator_i * m_pIterator;
CSphVector<RowID_t> m_dCollected {0};
const DeadRowMap_Disk_c & m_tDeadRowMap;
};
//////////////////////////////////////////////////////////////////////////
template <bool SINGLE_SORTER, bool HAS_FILTER_CALC, bool HAS_SORT_CALC, bool HAS_FILTER, bool HAS_RANDOMIZE, bool HAS_MAX_TIMER, bool HAS_CUTOFF, typename ITERATOR, typename TO_STATIC>
bool Fullscan ( ITERATOR & tIterator, TO_STATIC && fnToStatic, const CSphQueryContext & tCtx, CSphQueryResultMeta & tMeta, const VecTraits_T<ISphMatchSorter *> & dSorters, CSphMatch & tMatch, int iCutoff, int iIndexWeight, int64_t tmMaxTimer )
{
auto tScopedStats = AtScopeExit ( [&tMeta, &tIterator]{tMeta.m_tStats.m_iFetchedDocs = (DWORD)tIterator.GetNumProcessed(); } );
RowIdBlock_t dRowIDs;
Threads::Coro::HighFreqChecker_c fnHeavyCheck;
const int64_t& iCheckTimePoint { Threads::Coro::GetNextTimePointUS() };
while ( tIterator.GetNextRowIdBlock(dRowIDs) )
{
for ( auto i : dRowIDs )
{
tMatch.m_tRowID = i;
tMatch.m_pStatic = fnToStatic(i);
// early filter only (no late filters in full-scan because of no @weight)
if constexpr ( HAS_FILTER_CALC )
tCtx.CalcFilter(tMatch);
if constexpr ( HAS_FILTER )
{
if ( !tCtx.m_pFilter->Eval(tMatch) )
{
if_const ( HAS_FILTER_CALC )
tCtx.FreeDataFilter ( tMatch );
continue;
}
}
if constexpr ( HAS_RANDOMIZE )
tMatch.m_iWeight = ( sphRand() & 0xffff ) * iIndexWeight;
if constexpr ( HAS_SORT_CALC )
tCtx.CalcSort(tMatch);
bool bNewMatch = false;
if constexpr ( SINGLE_SORTER )
bNewMatch = dSorters[0]->Push(tMatch);
else
dSorters.for_each( [&tMatch, &bNewMatch] ( ISphMatchSorter * p ) { bNewMatch |= p->Push ( tMatch ); } );
// stringptr expressions should be duplicated (or taken over) at this point
if constexpr ( HAS_FILTER_CALC )
tCtx.FreeDataFilter ( tMatch );
if constexpr ( HAS_SORT_CALC )
tCtx.FreeDataSort ( tMatch );
if constexpr ( HAS_CUTOFF )
{
if ( bNewMatch && --iCutoff==0 )
return true;
}
}
// handle timer
if constexpr ( HAS_MAX_TIMER )
{
if ( sph::TimeExceeded ( tmMaxTimer ) )
{
tMeta.m_sWarning = "query time exceeded max_query_time";
return true;
}
}
if ( fnHeavyCheck() && sph::TimeExceeded ( iCheckTimePoint ) )
{
if ( session::GetKilled() )
{
tMeta.m_sWarning = "query was killed";
return true;
}
Threads::Coro::RescheduleAndKeepCrashQuery();
}
}
return tIterator.WasCutoffHit();
}
template <typename ITERATOR, typename TO_STATIC>
bool RunFullscan ( ITERATOR & tIterator, TO_STATIC && fnToStatic, const CSphQueryContext & tCtx, CSphQueryResultMeta & tMeta, const VecTraits_T<ISphMatchSorter *>& dSorters, CSphMatch & tMatch, int iCutoff, bool bRandomize, int iIndexWeight, int64_t tmMaxTimer )
{
bool bHasFilterCalc = !tCtx.m_dCalcFilter.IsEmpty();
bool bHasSortCalc = !tCtx.m_dCalcSort.IsEmpty();
bool bHasFilter = !!tCtx.m_pFilter;
bool bHasTimer = tmMaxTimer>0;
bool bHasCutoff = iCutoff!=-1;
// when we have join query with multiple sorters, the first sorter does all the work (including pushing to all other sorters)
// we can avoid pushing to other sorters to improve performance
bool bSingleSorter = dSorters.GetLength()==1 || dSorters[0]->IsJoin();
int iIndex = bSingleSorter*64 + bHasFilterCalc*32 + bHasSortCalc*16 + bHasFilter*8 + bRandomize*4 + bHasTimer*2 + bHasCutoff;
switch ( iIndex )
{
#define DECL_FNSCAN( _, n, params ) case n: return Fullscan<!!(n&64), !!(n&32), !!(n&16), !!(n&8), !!(n&4), !!(n&2), !!(n&1), ITERATOR, TO_STATIC> params;
BOOST_PP_REPEAT ( 128, DECL_FNSCAN, ( tIterator, std::forward<TO_STATIC> ( fnToStatic ), tCtx, tMeta, dSorters, tMatch, iCutoff, iIndexWeight, tmMaxTimer ) )
#undef DECL_FNSCAN
default:
assert ( 0 && "Internal error" );
break;
}
return false;
}
bool CSphIndex_VLN::RunFullscanOnAttrs ( const RowIdBoundaries_t & tBoundaries, const CSphQueryContext & tCtx, CSphQueryResultMeta & tMeta, const VecTraits_T<ISphMatchSorter *> & dSorters, CSphMatch & tMatch, int iCutoff, bool bRandomize, int iIndexWeight, int64_t tmMaxTimer ) const
{
const CSphRowitem * pStart = m_tAttr.GetReadPtr();
int iStride = m_tSchema.GetRowSize();
auto fnToStatic = [pStart, iStride]( RowID_t tRowID ){ return pStart+(int64_t)tRowID*iStride; };
if ( m_tDeadRowMap.HasDead() )
{
RowIterator_T<true> tIt ( tBoundaries, m_tDeadRowMap );
return RunFullscan ( tIt, fnToStatic, tCtx, tMeta, dSorters, tMatch, iCutoff, bRandomize, iIndexWeight, tmMaxTimer );
}
else
{
RowIterator_T<false> tIt ( tBoundaries, m_tDeadRowMap );
return RunFullscan ( tIt, fnToStatic, tCtx, tMeta, dSorters, tMatch, iCutoff, bRandomize, iIndexWeight, tmMaxTimer );
}
}
bool CSphIndex_VLN::RunFullscanOnIterator ( RowidIterator_i * pIterator, const CSphQueryContext & tCtx, CSphQueryResultMeta & tMeta, const VecTraits_T<ISphMatchSorter *> & dSorters, CSphMatch & tMatch, int iCutoff, bool bRandomize, int iIndexWeight, int64_t tmMaxTimer ) const
{
const CSphRowitem * pStart = m_tAttr.GetReadPtr();
int iStride = m_tSchema.GetRowSize();
auto fnToStatic = [pStart, iStride]( RowID_t tRowID ){ return pStart+(int64_t)tRowID*iStride; };
if ( m_tDeadRowMap.HasDead() )
{
RowIteratorAlive_c tIt ( pIterator, m_tDeadRowMap );
return RunFullscan ( tIt, fnToStatic, tCtx, tMeta, dSorters, tMatch, iCutoff, bRandomize, iIndexWeight, tmMaxTimer );
}
return RunFullscan ( *pIterator, fnToStatic, tCtx, tMeta, dSorters, tMatch, iCutoff, bRandomize, iIndexWeight, tmMaxTimer );
}
template <bool ROWID_LIMITS>
bool CSphIndex_VLN::ScanByBlocks ( const CSphQueryContext & tCtx, CSphQueryResultMeta & tMeta, const VecTraits_T<ISphMatchSorter *> & dSorters, CSphMatch & tMatch, int iCutoff, bool bRandomize, int iIndexWeight, int64_t tmMaxTimer, const RowIdBoundaries_t * pBoundaries ) const
{
int iStartIndexEntry = 0;
int iEndIndexEntry = (int)m_iDocinfoIndex;
if ( ROWID_LIMITS )
{
assert(pBoundaries);
iStartIndexEntry = pBoundaries->m_tMinRowID / DOCINFO_INDEX_FREQ;
iEndIndexEntry = pBoundaries->m_tMaxRowID / DOCINFO_INDEX_FREQ + 1;
}
int iStride = m_tSchema.GetRowSize();
for ( int64_t iIndexEntry=iStartIndexEntry; iIndexEntry!=iEndIndexEntry; iIndexEntry++ )
{
// block-level filtering
const DWORD * pMin = &m_pDocinfoIndex[ iIndexEntry*iStride*2 ];
const DWORD * pMax = pMin + iStride;
if ( tCtx.m_pFilter && !tCtx.m_pFilter->EvalBlock ( pMin, pMax ) )
continue;
RowIdBoundaries_t tBlockBoundaries;
tBlockBoundaries.m_tMinRowID = RowID_t ( iIndexEntry*DOCINFO_INDEX_FREQ );
tBlockBoundaries.m_tMaxRowID = (RowID_t)Min ( ( iIndexEntry+1 )*DOCINFO_INDEX_FREQ, m_iDocinfo ) - 1;
if ( ROWID_LIMITS )
{
// clamp block start/end to match limits. need this only for first/last blocks
if ( iIndexEntry==iStartIndexEntry || iIndexEntry==iEndIndexEntry-1 )
{
assert(pBoundaries);
tBlockBoundaries.m_tMinRowID = Max ( tBlockBoundaries.m_tMinRowID, pBoundaries->m_tMinRowID );
tBlockBoundaries.m_tMaxRowID = Min ( tBlockBoundaries.m_tMaxRowID, pBoundaries->m_tMaxRowID );
}
}
if ( RunFullscanOnAttrs ( tBlockBoundaries, tCtx, tMeta, dSorters, tMatch, iCutoff, bRandomize, iIndexWeight, tmMaxTimer ) )
return true;
}
return false;
}
RowIteratorsWithEstimates_t CSphIndex_VLN::CreateColumnarAnalyzerOrPrefilter ( CSphVector<SecondaryIndexInfo_t> & dSIInfo, const CSphVector<CSphFilterSettings> & dFilters, const CSphVector<FilterTreeItem_t> & dFilterTree, const ISphFilter * pFilter, ESphCollation eCollation, const ISphSchema & tSchema, CSphString & sWarning ) const
{
if ( !m_pColumnar || dFilterTree.GetLength() || !pFilter )
return {};
std::vector<common::Filter_t> dColumnarFilters;
std::vector<int> dFilterMap;
ToColumnarFilters ( dFilters, dColumnarFilters, dFilterMap, tSchema, eCollation, sWarning );
// remove disabled analyzers
std::vector<int> dToDelete;
for ( size_t i = 0; i < dFilterMap.size(); i++ )
{
if ( dFilterMap[i]==-1 )
continue;
if ( dSIInfo[i].m_eType!=SecondaryIndexType_e::ANALYZER && dFilters[i].m_sAttrName!="@rowid" )
{
dToDelete.push_back ( dFilterMap[i] );
dFilterMap[i] = -1;
}
}
for ( int i = (int)dToDelete.size()-1; i>=0; i-- )
dColumnarFilters.erase ( dColumnarFilters.begin() + dToDelete[i] );
if ( dColumnarFilters.empty() || ( dColumnarFilters.size()==1 && dColumnarFilters[0].m_sName=="@rowid" ) )
return {};
std::vector<int> dDeletedFilters;
std::vector<common::BlockIterator_i *> dIterators;
dIterators = m_pColumnar->CreateAnalyzerOrPrefilter ( dColumnarFilters, dDeletedFilters, *pFilter );
if ( dIterators.empty() )
return {};
for ( size_t i = 0; i < dFilterMap.size(); i++ )
if ( dFilterMap[i]!=-1 && std::binary_search ( dDeletedFilters.begin(), dDeletedFilters.end(), dFilterMap[i] ) )
dSIInfo[i].m_bCreated = true;
RowIdBoundaries_t tBoundaries;
const CSphFilterSettings * pRowIdFilter = GetRowIdFilter ( dFilters, RowID_t(m_iDocinfo), tBoundaries );
RowIteratorsWithEstimates_t dResult;
for ( auto & i : dIterators )
dResult.Add ( { CreateIteratorWrapper ( i, pRowIdFilter ? &tBoundaries : nullptr ), 0 } );
for ( int i = 0; i < (int)dFilterMap.size(); i++ )
{
int iColumnarFilter = dFilterMap[i];
if ( iColumnarFilter<0 )
continue;
auto tFound = std::find ( dDeletedFilters.begin(), dDeletedFilters.end(), iColumnarFilter );
if ( tFound == dDeletedFilters.end() )
continue;
int iFound = tFound - dDeletedFilters.begin();
dResult[iFound].second = dSIInfo[i].m_iRsetEstimate;
}
return dResult;
}
static void RecreateFilters ( const CSphVector<SecondaryIndexInfo_t> & dSIInfo, const CSphVector<CSphFilterSettings> & dFilters, CSphQueryContext & tCtx, CreateFilterContext_t & tFlx, CSphQueryResultMeta & tMeta, CSphVector<CSphFilterSettings> & dModifiedFilters )
{
dModifiedFilters.Resize(0);
ARRAY_FOREACH ( i, dSIInfo )
{
bool bRemovedOptional = dFilters[i].m_bOptional && dSIInfo[i].m_eType==SecondaryIndexType_e::NONE;
if ( !dSIInfo[i].m_bCreated && !bRemovedOptional )
dModifiedFilters.Add ( dFilters[i] );
}
tCtx.m_pFilter.reset();
tFlx.m_pFilters = &dModifiedFilters;
tCtx.CreateFilters ( tFlx, tMeta.m_sError, tMeta.m_sWarning );
}
bool CSphIndex_VLN::SelectIteratorsFT ( const CSphQuery & tQuery, const CSphVector<CSphFilterSettings> & dFilters, const ISphSchema & tSorterSchema, ISphRanker * pRanker, CSphVector<SecondaryIndexInfo_t> & dSIInfo, int iCutoff, int iThreads, StrVec_t & dWarnings ) const
{
// in fulltext case we do the following:
// 1. calculate cost of FT search and number of docs after FT search
// 2. calculate cost of filters over the number of docs after FT search
// 3. calculate the best cost of filters/scan/SI over the whole index
// 4. estimate the cost of intersecting FT and iterator results
NodeEstimate_t tEstimate = pRanker->Estimate ( m_iDocinfo );
// always do single-thread estimates here
SelectIteratorCtx_t tSelectIteratorCtx ( tQuery, dFilters, m_tSchema, tSorterSchema, m_pHistograms, m_pColumnar.get(), m_tSI, iCutoff, m_iDocinfo, 1 );
tSelectIteratorCtx.IgnorePushCost();
float fBestCost = FLT_MAX;
dSIInfo = SelectIterators ( tSelectIteratorCtx, fBestCost, dWarnings );
// check that we have anything non-plain-filter. if not, bail out
if ( !dSIInfo.any_of ( []( const auto & tInfo ){ return tInfo.m_eType==SecondaryIndexType_e::LOOKUP || tInfo.m_eType==SecondaryIndexType_e::INDEX || tInfo.m_eType==SecondaryIndexType_e::ANALYZER; } ) )
return false;
CSphVector<SecondaryIndexInfo_t> dSIInfoFilters { dSIInfo.GetLength() };
float fValuesAfterFilters = 1.0f;
ARRAY_FOREACH ( i, dSIInfo )
if ( dFilters[i].m_sAttrName != "@rowid" )
{
dSIInfoFilters[i] = dSIInfo[i];
fValuesAfterFilters *= float(dSIInfo[i].m_iRsetEstimate) / m_iDocinfo;
dSIInfoFilters[i].m_eType = SecondaryIndexType_e::FILTER;
}
// correct rset estimates (we are estimating filters after FT)
float fCostOfFilters = 0.0f;
if ( tEstimate.m_iDocs>0 )
{
// fixme! update code to use SelectIteratorCtx_t::m_fDocsLeft
float fRatio = float ( tEstimate.m_iDocs ) / tSelectIteratorCtx.m_iTotalDocs;
for ( auto & i : dSIInfoFilters )
i.m_iRsetEstimate *= fRatio;
tSelectIteratorCtx.m_iTotalDocs = tEstimate.m_iDocs;
tSelectIteratorCtx.m_bFromIterator = true;
std::unique_ptr<CostEstimate_i> pCostEstimate ( CreateCostEstimate ( dSIInfoFilters, tSelectIteratorCtx, tSelectIteratorCtx.m_iCutoff ) );
fCostOfFilters = pCostEstimate->CalcQueryCost();
}
int64_t iDocsAfterFilters = int64_t(fValuesAfterFilters*m_iDocinfo);
NodeEstimate_t tIteratorEst = { fBestCost, iDocsAfterFilters, 1 };
const int ITERATOR_BLOCK_SIZE = 1024;
tEstimate.m_iDocs = Max ( tEstimate.m_iDocs, 1 );
float fIteratorWithFT = CalcFTIntersectCost ( tIteratorEst, tEstimate, m_iDocinfo, ITERATOR_BLOCK_SIZE, MAX_BLOCK_DOCS );
float fFTWithFilters = tEstimate.m_fCost + fCostOfFilters;
fIteratorWithFT = EstimateMTCostSIFT ( fIteratorWithFT, iThreads );
fFTWithFilters = EstimateMTCost ( fFTWithFilters, iThreads );
if ( fIteratorWithFT<fFTWithFilters )
{
return true;
} else
{
// if has any forced indexes when should use the path with iterators even FT estimates faster
return dSIInfo.any_of ( []( const auto & tInfo ){ return tInfo.m_eForce!=SecondaryIndexType_e::NONE; } );
}
}
static int CalcRemovedOptionalFilters ( const CSphVector<CSphFilterSettings> & dFilters, const CSphVector<SecondaryIndexInfo_t> & dSIInfo )
{
int iRemovedOptional = 0;
ARRAY_FOREACH ( i, dFilters )
if ( dFilters[i].m_bOptional && dSIInfo[i].m_eType==SecondaryIndexType_e::NONE )
iRemovedOptional++;
return iRemovedOptional;
}
static void RemoveOptionalFilters ( const CSphVector<CSphFilterSettings> & dFilters, CSphQueryContext & tCtx, CreateFilterContext_t & tFlx, CSphQueryResultMeta & tMeta, CSphVector<CSphFilterSettings> & dModifiedFilters )
{
int iNumOptional = 0;
dModifiedFilters.Resize(0);
for ( auto & i : dFilters )
if ( i.m_bOptional )
iNumOptional++;
else
dModifiedFilters.Add(i);
if ( iNumOptional )
{
tCtx.m_pFilter.reset();
tFlx.m_pFilters = &dModifiedFilters;
tCtx.CreateFilters ( tFlx, tMeta.m_sError, tMeta.m_sWarning );
}
}
bool CSphIndex_VLN::ChooseIterators ( CSphVector<SecondaryIndexInfo_t> & dSIInfo, const CSphQuery & tQuery, const CSphVector<CSphFilterSettings> & dFilters, CSphQueryContext & tCtx, CreateFilterContext_t & tFlx, const ISphSchema & tMaxSorterSchema, CSphQueryResultMeta & tMeta, int iCutoff, int iThreads, CSphVector<CSphFilterSettings> & dModifiedFilters, ISphRanker * pRanker ) const
{
StrVec_t dWarnings;
bool bKNN = !tQuery.m_sKNNAttr.IsEmpty();
float fBestCost = FLT_MAX;
if ( bKNN )
{
SelectIteratorCtx_t tSelectIteratorCtx ( tQuery, dFilters, m_tSchema, tMaxSorterSchema, m_pHistograms, m_pColumnar.get(), m_tSI, iCutoff, m_iDocinfo, 1 );
tSelectIteratorCtx.m_bFromIterator = true;
int iRequestedKNNDocs = Min ( tQuery.m_iKNNK, m_iDocinfo );
tSelectIteratorCtx.m_fDocsLeft = float(iRequestedKNNDocs)/m_iDocinfo;
dSIInfo = SelectIterators ( tSelectIteratorCtx, fBestCost, dWarnings );
}
else
{
if ( !pRanker )
{
// In order to maintain some consistency with GetPseudoShardingMetric() we need to do one of the following:
// a. Run this with the number of docs in this pseudo_chunk and one thread
// b. Run this with the same number of docs and number of threads as in GetPseudoShardingMetric()
// For now we use approach b) as it is simpler
SelectIteratorCtx_t tSelectIteratorCtx ( tQuery, dFilters, m_tSchema, tMaxSorterSchema, m_pHistograms, m_pColumnar.get(), m_tSI, iCutoff, m_iDocinfo, iThreads );
dSIInfo = SelectIterators ( tSelectIteratorCtx, fBestCost, dWarnings );
}
else
{
bool bRes = SelectIteratorsFT ( tQuery, dFilters, tMaxSorterSchema, pRanker, dSIInfo, iCutoff, iThreads, dWarnings );
if ( !bRes )
{
// if we did not spawn any iterators, we need to remove optional filters (as they assume they will be replaced by iterators)
RemoveOptionalFilters ( dFilters, tCtx, tFlx, tMeta, dModifiedFilters );
return false;
}
}
}
if ( dWarnings.GetLength() )
tMeta.m_sWarning = ConcatWarnings(dWarnings);
return true;
}
std::pair<RowidIterator_i *, bool> CSphIndex_VLN::SpawnIterators ( const CSphQuery & tQuery, const CSphVector<CSphFilterSettings> & dFilters, CSphQueryContext & tCtx, CreateFilterContext_t & tFlx, const ISphSchema & tMaxSorterSchema, CSphQueryResultMeta & tMeta, int iCutoff, int iThreads, CSphVector<CSphFilterSettings> & dModifiedFilters, ISphRanker * pRanker ) const
{
if ( !dFilters.GetLength() )
{
if ( !tQuery.m_sKNNAttr.IsEmpty() )
return CreateKNNIterator ( m_pKNN.get(), tQuery, m_tSchema, tMaxSorterSchema, tMeta.m_sError );
return { nullptr, false };
}
// using g_iPseudoShardingThresh==0 check so that iterators are still spawned in test suite (when g_iPseudoShardingThresh=0)
const int64_t SMALL_INDEX_THRESH = 8192;
if ( m_iDocinfo < SMALL_INDEX_THRESH && g_iPseudoShardingThresh > 0 )
{
dModifiedFilters = dFilters;
return { nullptr, false };
}
CSphVector<SecondaryIndexInfo_t> dSIInfo;
if ( !ChooseIterators ( dSIInfo, tQuery, dFilters, tCtx, tFlx, tMaxSorterSchema, tMeta, iCutoff, iThreads, dModifiedFilters, pRanker ) )
return { nullptr, false };
RowIteratorsWithEstimates_t dSIIterators, dLookupIterators, dAnalyzerIterators, dKNNIterators;
int iRemovedOptional = CalcRemovedOptionalFilters ( dFilters, dSIInfo );
// knn iterators
bool bError = false;
dKNNIterators = CreateKNNIterators ( m_pKNN.get(), tQuery, m_tSchema, tMaxSorterSchema, bError, tMeta.m_sError );
if ( bError )
return { nullptr, true };
// secondary index iterators
dSIIterators = m_tSI.CreateSecondaryIndexIterator ( dSIInfo, dFilters, tQuery.m_eCollation, tMaxSorterSchema, RowID_t(m_iDocinfo), iCutoff );
// lookup-by-id (.SPT) iterators
dLookupIterators = CreateLookupIterator ( dSIInfo, dFilters, m_tDocidLookup.GetReadPtr(), RowID_t(m_iDocinfo) );
// try to spawn analyzers or prefilters from columnar storage
// if we already created an iterator at prev stage, we need to recreate filters here,
// so we won't be doing unnecessary minmax eval over filters that were replaced by iterators
int iCreated = 0;
dSIInfo.for_each ( [&]( const SecondaryIndexInfo_t & tInfo ){ if ( tInfo.m_bCreated ) iCreated++; } );
if ( ( iCreated && m_pColumnar ) || iRemovedOptional )
RecreateFilters ( dSIInfo, dFilters, tCtx, tFlx, tMeta, dModifiedFilters );
dAnalyzerIterators = CreateColumnarAnalyzerOrPrefilter ( dSIInfo, dFilters, tQuery.m_dFilterTree, tCtx.m_pFilter.get(), tQuery.m_eCollation, tMaxSorterSchema, tMeta.m_sWarning );
int iCreatedAfterColumnar = 0;
dSIInfo.for_each ( [&]( const SecondaryIndexInfo_t & tInfo ){ if ( tInfo.m_bCreated ) iCreatedAfterColumnar++; } );
// if we created an analyzer, we need to recreate filters
if ( ( !m_pColumnar && iCreated>0 ) || iCreatedAfterColumnar!=iCreated )
RecreateFilters ( dSIInfo, dFilters, tCtx, tFlx, tMeta, dModifiedFilters );
RowIteratorsWithEstimates_t dAllIterators;
for ( auto i : dKNNIterators )
dAllIterators.Add(i);
for ( auto i : dSIIterators )
dAllIterators.Add(i);
for ( auto i : dLookupIterators )
dAllIterators.Add(i);
for ( auto i : dAnalyzerIterators )
dAllIterators.Add(i);
dAllIterators.Sort ( ::bind ( &std::pair<RowidIterator_i *,int64_t>::second ) );
CSphVector<RowidIterator_i *> dFinalIterators;
for ( auto i : dAllIterators )
dFinalIterators.Add ( i.first );
switch ( dFinalIterators.GetLength() )
{
case 0:
RemoveOptionalFilters ( dFilters, tCtx, tFlx, tMeta, dModifiedFilters );
return { nullptr, false };
case 1:
return { dFinalIterators[0], false };
default:
// both columnar iterator wrappers and secondary index iterators support rowid filtering, so no need for it here
return { CreateIteratorIntersect ( dFinalIterators, nullptr ), false };
}
}
static bool AreAllFiltersColumnar ( const CSphVector<CSphFilterSettings> & dFilters, const ISphSchema & tSchema )
{
return dFilters.all_of ( [&tSchema]( const CSphFilterSettings & tFilter )
{
if ( tFilter.m_sAttrName=="@rowid" )
return true;
const CSphColumnInfo * pCol = tSchema.GetAttr ( tFilter.m_sAttrName.cstr() );
return pCol ? ( pCol->IsColumnar() || pCol->IsColumnarExpr() ) : false;
} );
}
static bool AreAllFiltersExpressions ( const CSphVector<CSphFilterSettings> & dFilters, const ISphSchema & tSchema )
{
return dFilters.all_of ( [&tSchema]( const CSphFilterSettings & tFilter )
{
if ( tFilter.m_sAttrName=="@rowid" )
return true;
const CSphColumnInfo * pCol = tSchema.GetAttr ( tFilter.m_sAttrName.cstr() );
return pCol && pCol->m_pExpr;
} );
}
bool CSphIndex_VLN::SetupFiltersAndContext ( CSphQueryContext & tCtx, CreateFilterContext_t & tFlx, CSphQueryResultMeta & tMeta, const ISphSchema * & pMaxSorterSchema, CSphVector<CSphFilterSettings> & dTransformedFilters, CSphVector<FilterTreeItem_t> & dTransformedFilterTree, std::unique_ptr<ISphSchema> & pModifiedMatchSchema, const VecTraits_T<ISphMatchSorter *> & dSorters, const CSphMultiQueryArgs & tArgs ) const
{
// select the sorter with max schema
int iMaxSchemaIndex = GetMaxSchemaIndexAndMatchCapacity ( dSorters ).first;
pMaxSorterSchema = dSorters[iMaxSchemaIndex]->GetSchema();
auto dSorterSchemas = SorterSchemas ( dSorters, iMaxSchemaIndex);
auto & tQuery = tCtx.m_tQuery;
// setup filters
tFlx.m_pFilters = &tQuery.m_dFilters;
tFlx.m_pFilterTree = &tQuery.m_dFilterTree;
tFlx.m_pMatchSchema = pMaxSorterSchema;
tFlx.m_pIndexSchema = &m_tSchema;
tFlx.m_pBlobPool = m_tBlobAttrs.GetReadPtr();
tFlx.m_pColumnar = m_pColumnar.get();
tFlx.m_eCollation = tQuery.m_eCollation;
tFlx.m_bScan = tQuery.m_sQuery.IsEmpty();
tFlx.m_pHistograms = m_pHistograms;
tFlx.m_pSI = &m_tSI;
tFlx.m_iTotalDocs = m_iDocinfo;
tFlx.m_sJoinIdx = tQuery.m_sJoinIdx;
// may modify eval stages in schema; needs to be before SetupCalc
if ( !TransformFilters ( tFlx, dTransformedFilters, dTransformedFilterTree, pModifiedMatchSchema, tQuery.m_dItems, tMeta.m_sError ) )
return false;
if ( pModifiedMatchSchema )
tFlx.m_pMatchSchema = pModifiedMatchSchema.get();
tFlx.m_pFilters = &dTransformedFilters;
tFlx.m_pFilterTree = dTransformedFilterTree.GetLength() ? &dTransformedFilterTree : nullptr;
// setup calculations and result schema
if ( !tCtx.SetupCalc ( tMeta, *tFlx.m_pMatchSchema, m_tSchema, m_tBlobAttrs.GetReadPtr(), m_pColumnar.get(), dSorterSchemas ) )
return false;
// set blob pool for string on_sort expression fix up
tCtx.SetBlobPool ( m_tBlobAttrs.GetReadPtr() );
tCtx.SetColumnar ( m_pColumnar.get() );
tCtx.m_pProfile = tMeta.m_pProfile;
tCtx.m_pLocalDocs = tArgs.m_pLocalDocs;
tCtx.m_iTotalDocs = ( tArgs.m_iTotalDocs ? tArgs.m_iTotalDocs : m_tStats.m_iTotalDocuments );
tCtx.m_iIndexTotalDocs = m_iDocinfo;
return tCtx.CreateFilters ( tFlx, tMeta.m_sError, tMeta.m_sWarning );
}
bool CSphIndex_VLN::MultiScan ( CSphQueryResult & tResult, const CSphQuery & tQuery, const VecTraits_T<ISphMatchSorter *> & dSorters, const CSphMultiQueryArgs & tArgs, int64_t tmMaxTimer ) const
{
assert ( tArgs.m_iTag>=0 );
auto& tMeta = *tResult.m_pMeta;
// check if index is ready
if ( !m_bPassedAlloc )
{
tMeta.m_sError = "table not preread";
return false;
}
// check if index supports scans
if ( !m_tSchema.GetAttrsCount() )
{
tMeta.m_sError = "need attributes to run fullscan";
return false;
}
// we count documents only (before filters)
if ( tQuery.m_iMaxPredictedMsec )
tMeta.m_bHasPrediction = true;
if ( tArgs.m_uPackedFactorFlags & SPH_FACTOR_ENABLE )
tMeta.m_sWarning.SetSprintf ( "packedfactors() will not work with a fullscan; you need to specify a query" );
// check if index has data
if ( m_bIsEmpty || m_iDocinfo<=0 )
{
PooledAttrsToPtrAttrs ( dSorters, m_tBlobAttrs.GetReadPtr(), m_pColumnar.get(), tArgs.m_bFinalizeSorters, tMeta.m_pProfile, tArgs.m_bModifySorterSchemas );
return true;
}
// start counting
int64_t tmQueryStart = sphMicroTimer();
int64_t tmCpuQueryStart = sphTaskCpuTimer();
std::optional<ScopedLowPriority_c> tPrio;
if ( tQuery.m_bLowPriority )
tPrio.emplace();
CSphQueryContext tCtx(tQuery);
CreateFilterContext_t tFlx;
const ISphSchema * pMaxSorterSchema = nullptr;
CSphVector<CSphFilterSettings> dTransformedFilters; // holds filter settings if they were modified. filters hold pointers to those settings
CSphVector<FilterTreeItem_t> dTransformedFilterTree;
std::unique_ptr<ISphSchema> pModifiedMatchSchema; // may contain same schema but with modified eval stages
if ( !SetupFiltersAndContext ( tCtx, tFlx, tMeta, pMaxSorterSchema, dTransformedFilters, dTransformedFilterTree, pModifiedMatchSchema, dSorters, tArgs ) )
return false;
assert(pMaxSorterSchema);
const ISphSchema & tMaxSorterSchema = *pMaxSorterSchema;
if ( CheckEarlyReject ( dTransformedFilters, tCtx.m_pFilter.get(), tQuery.m_eCollation, tMaxSorterSchema ) )
{
PooledAttrsToPtrAttrs ( dSorters, m_tBlobAttrs.GetReadPtr(), m_pColumnar.get(), tArgs.m_bFinalizeSorters, tMeta.m_pProfile, tArgs.m_bModifySorterSchemas );
tMeta.m_iQueryTime += (int)( ( sphMicroTimer()-tmQueryStart )/1000 );
tMeta.m_iCpuTime += sphTaskCpuTimer ()-tmCpuQueryStart;
return true;
}
// setup sorters
for ( auto & i : dSorters )
{
i->SetBlobPool ( m_tBlobAttrs.GetReadPtr() );
i->SetColumnar ( m_pColumnar.get() );
}
// prepare to work them rows
bool bRandomize = dSorters[0]->IsRandom();
CSphMatch tMatch;
tMatch.Reset ( tMaxSorterSchema.GetDynamicSize() );
tMatch.m_iWeight = tArgs.m_iIndexWeight;
// fixme! tag also used over bitmask | 0x80000000,
// which marks that match comes from remote.
// using -1 might be also interpreted as 0xFFFFFFFF in such context!
// Does it intended?
tMatch.m_iTag = tCtx.m_dCalcFinal.GetLength() ? -1 : tArgs.m_iTag;
auto & tSess = session::Info();
tSess.m_pSessionOpaque1 = (void*)(const DocstoreReader_i*)this;
tSess.m_pSessionOpaque2 = (void*)m_pDocstore.get();
SwitchProfile ( tMeta.m_pProfile, SPH_QSTATE_SETUP_ITER );
int iCutoff = ApplyImplicitCutoff ( tQuery, dSorters, false );
bool bAllPrecalc = dSorters.GetLength() && dSorters.all_of ( []( auto pSorter ){ return pSorter->IsPrecalc(); } );
int iOldLen = tMeta.m_tIteratorStats.m_dIterators.GetLength();
for ( auto & i : dSorters )
i->AddDesc ( tMeta.m_tIteratorStats.m_dIterators );
if ( tMeta.m_tIteratorStats.m_dIterators.GetLength()!=iOldLen )
tMeta.m_tIteratorStats.m_iTotal = 1;
// try to spawn an iterator from a secondary index
CSphVector<CSphFilterSettings> dFiltersAfterIterator; // holds filter settings if they were modified. filters hold pointers to those settings
std::unique_ptr<RowidIterator_i> pIterator;
if ( bAllPrecalc )
tCtx.m_pFilter.reset();
else
{
auto tSpawned = SpawnIterators ( tQuery, dTransformedFilters, tCtx, tFlx, tMaxSorterSchema, tMeta, iCutoff, tArgs.m_iTotalThreads, dFiltersAfterIterator, nullptr );
pIterator = std::unique_ptr<RowidIterator_i> ( tSpawned.first );
if ( tSpawned.second )
return false;
}
SwitchProfile ( tMeta.m_pProfile, SPH_QSTATE_FULLSCAN );
bool bCutoffHit = false;
if ( pIterator )
{
if ( iCutoff>=0 && !tCtx.m_pFilter )
pIterator->SetCutoff(iCutoff);
bCutoffHit = RunFullscanOnIterator ( pIterator.get(), tCtx, tMeta, dSorters, tMatch, iCutoff, bRandomize, tArgs.m_iIndexWeight, tmMaxTimer );
pIterator->AddDesc ( tMeta.m_tIteratorStats.m_dIterators );
tMeta.m_tIteratorStats.m_iTotal = 1;
}
else
{
RowIdBoundaries_t tBoundaries;
const CSphFilterSettings * pRowIdFilter = GetRowIdFilter ( dFiltersAfterIterator, (RowID_t)m_iDocinfo, tBoundaries );
if ( !pRowIdFilter )
tBoundaries.m_tMaxRowID = RowID_t(m_iDocinfo)-1;
bool bAllFiltersColumnar = AreAllFiltersColumnar ( dFiltersAfterIterator, m_tSchema );
bool bOnlyExprFilters = AreAllFiltersExpressions ( dFiltersAfterIterator, tMaxSorterSchema );
bool bAllAttrsColumnar = !m_iDocinfoIndex;
// use block filtering only when we have attribute with block index
if ( bAllFiltersColumnar || bAllAttrsColumnar || bOnlyExprFilters )
bCutoffHit = RunFullscanOnAttrs ( tBoundaries, tCtx, tMeta, dSorters, tMatch, iCutoff, bRandomize, tArgs.m_iIndexWeight, tmMaxTimer );
else
{
if ( pRowIdFilter )
bCutoffHit = ScanByBlocks<true> ( tCtx, tMeta, dSorters, tMatch, iCutoff, bRandomize, tArgs.m_iIndexWeight, tmMaxTimer, &tBoundaries );
else
bCutoffHit = ScanByBlocks<false> ( tCtx, tMeta, dSorters, tMatch, iCutoff, bRandomize, tArgs.m_iIndexWeight, tmMaxTimer );
}
}
tMeta.m_bTotalMatchesApprox = bCutoffHit && !bAllPrecalc;
SwitchProfile ( tMeta.m_pProfile, SPH_QSTATE_FINALIZE );
if ( dSorters.any_of ( [&] ( ISphMatchSorter * p ) { return !p->FinalizeJoin ( tMeta.m_sError, tMeta.m_sWarning ); } ) )
return false;
// do final expression calculations
if ( tCtx.m_dCalcFinal.GetLength() )
{
DocstoreSession_c tSession;
int64_t iSessionUID = tSession.GetUID();
// spawn buffered readers for the current session
// put them to a global hash
if ( m_pDocstore )
m_pDocstore->CreateReader ( iSessionUID );
DocstoreSession_c::InfoRowID_t tSessionInfo;
tSessionInfo.m_pDocstore = m_pDocstore.get();
tSessionInfo.m_iSessionId = iSessionUID;
for ( auto & i : tCtx.m_dCalcFinal )
{
assert ( i.m_pExpr );
if ( m_pDocstore )
i.m_pExpr->Command ( SPH_EXPR_SET_DOCSTORE_ROWID, &tSessionInfo );
}
SphFinalMatchCalc_t tFinal ( tArgs.m_iTag, tCtx );
dSorters.Apply ( [&] ( ISphMatchSorter * p ) { p->Finalize ( tFinal, false, false ); } );
}
PooledAttrsToPtrAttrs ( dSorters, m_tBlobAttrs.GetReadPtr(), m_pColumnar.get(), tArgs.m_bFinalizeSorters, tMeta.m_pProfile, tArgs.m_bModifySorterSchemas );
// done
tResult.m_pBlobPool = m_tBlobAttrs.GetReadPtr();
tResult.m_pDocstore = m_pDocstore ? this : nullptr;
tResult.m_pColumnar = m_pColumnar.get();
tMeta.m_iQueryTime += (int)( ( sphMicroTimer()-tmQueryStart )/1000 );
tMeta.m_iCpuTime += sphTaskCpuTimer ()-tmCpuQueryStart;
return true;
}
//////////////////////////////////////////////////////////////////////////////
ISphQword * DiskIndexQwordSetup_c::QwordSpawn ( const XQKeyword_t & tWord ) const
{
if ( !tWord.m_pPayload )
{
WITH_QWORD ( m_pIndex, false, Qword, return new Qword ( tWord.m_bExpanded, tWord.m_bExcluded, m_pIndex->GetIndexId() ) );
} else
{
if ( m_pIndex->GetSettings().m_eHitFormat==SPH_HIT_FORMAT_INLINE )
return new DiskPayloadQword_c<true> ( (const DiskSubstringPayload_t *)tWord.m_pPayload, tWord.m_bExcluded, m_pDoclist, m_pHitlist, m_pIndex->GetIndexId() );
else
return new DiskPayloadQword_c<false> ( (const DiskSubstringPayload_t *)tWord.m_pPayload, tWord.m_bExcluded, m_pDoclist, m_pHitlist, m_pIndex->GetIndexId() );
}
return NULL;
}
bool DiskIndexQwordSetup_c::QwordSetup ( ISphQword * pWord ) const
{
auto * pMyWord = (DiskIndexQwordTraits_c*)pWord;
// setup attrs
pMyWord->m_tDoc.Reset ( m_iDynamicRowitems );
pMyWord->m_tDoc.m_tRowID = INVALID_ROWID;
return pMyWord->Setup ( this );
}
bool DiskIndexQwordSetup_c::SetupWithWrd ( const DiskIndexQwordTraits_c& tWord, DictEntry_t& tRes ) const
{
auto* pIndex = (CSphIndex_VLN*)const_cast<CSphIndex*> ( m_pIndex );
const char * sWord = tWord.m_sDictWord.cstr();
int iWordLen = sWord ? (int) strlen ( sWord ) : 0;
if ( tWord.m_sWord.Ends("*") )
{
iWordLen = Max ( iWordLen-1, 0 );
// might match either infix or prefix
int iMinLen = Max ( pIndex->m_tSettings.GetMinPrefixLen ( true ), pIndex->m_tSettings.m_iMinInfixLen );
if ( pIndex->m_tSettings.GetMinPrefixLen ( true ) )
iMinLen = Min ( iMinLen, pIndex->m_tSettings.GetMinPrefixLen ( true ) );
if ( pIndex->m_tSettings.m_iMinInfixLen )
iMinLen = Min ( iMinLen, pIndex->m_tSettings.m_iMinInfixLen );
// bail out term shorter than prefix or infix allowed
if ( iWordLen<iMinLen )
return false;
}
// leading special symbols trimming
if ( tWord.m_sDictWord.Begins("*") )
{
++sWord;
iWordLen = Max ( iWordLen-1, 0 );
// bail out term shorter than infix allowed
if ( iWordLen<pIndex->m_tSettings.m_iMinInfixLen )
return false;
}
const CSphWordlistCheckpoint * pCheckpoint = pIndex->m_tWordlist.FindCheckpointWrd ( sWord, iWordLen, false );
if ( !pCheckpoint )
return false;
// decode wordlist chunk
const BYTE * pBuf = pIndex->m_tWordlist.AcquireDict ( pCheckpoint );
assert ( pBuf );
assert ( m_iSkiplistBlockSize>0 );
KeywordsBlockReader_c tCtx ( pBuf, m_iSkiplistBlockSize );
while ( tCtx.UnpackWord() )
{
// block is sorted
// so once keywords are greater than the reference word, no more matches
assert ( tCtx.GetWordLen()>0 );
int iCmp = sphDictCmpStrictly ( sWord, iWordLen, tCtx.GetWord(), tCtx.GetWordLen() );
if ( iCmp<0 )
return false;
if ( iCmp==0 )
break;
}
if ( tCtx.GetWordLen()<=0 )
return false;
tRes = tCtx;
return true;
}
bool DiskIndexQwordSetup_c::SetupWithCrc ( const DiskIndexQwordTraits_c& tWord, DictEntry_t& tRes ) const
{
auto * pIndex = (CSphIndex_VLN *)const_cast<CSphIndex *>(m_pIndex);
const CSphWordlistCheckpoint * pCheckpoint = pIndex->m_tWordlist.FindCheckpointCrc ( tWord.m_uWordID );
if ( !pCheckpoint )
return false;
const BYTE * pBuf = pIndex->m_tWordlist.AcquireDict ( pCheckpoint );
assert ( pBuf );
assert ( m_iSkiplistBlockSize>0 );
return pIndex->m_tWordlist.GetWord ( pBuf, tWord.m_uWordID, tRes );
}
bool DiskIndexQwordSetup_c::Setup ( ISphQword * pWord ) const
{
// there was a dynamic_cast here once but it's not necessary
// maybe it worth to rewrite class hierarchy to avoid c-cast here?
DiskIndexQwordTraits_c & tWord = *(DiskIndexQwordTraits_c*)pWord;
// setup stats
tWord.m_iDocs = 0;
tWord.m_iHits = 0;
auto * pIndex = (CSphIndex_VLN *)const_cast<CSphIndex *>(m_pIndex);
// !COMMIT FIXME!
// the below stuff really belongs in wordlist
// which in turn really belongs in dictreader
// which in turn might or might not be a part of dict
// binary search through checkpoints for a one whose range matches word ID
assert ( pIndex->m_bPassedAlloc );
assert ( !pIndex->m_tWordlist.m_tBuf.IsEmpty() );
// empty index?
if ( !pIndex->m_tWordlist.m_dCheckpoints.GetLength() )
return false;
DictEntry_t tRes;
if ( pIndex->m_pDict->GetSettings().m_bWordDict )
{
if ( !SetupWithWrd ( tWord, tRes ) )
return false;
} else if ( !SetupWithCrc ( tWord, tRes ) )
return false;
const ESphHitless eMode = pIndex->m_tSettings.m_eHitless;
tWord.m_iDocs = eMode==SPH_HITLESS_SOME ? ( tRes.m_iDocs & HITLESS_DOC_MASK ) : tRes.m_iDocs;
tWord.m_iHits = tRes.m_iHits;
tWord.m_bHasHitlist =
( eMode==SPH_HITLESS_NONE ) ||
( eMode==SPH_HITLESS_SOME && !( tRes.m_iDocs & HITLESS_DOC_FLAG ) );
if ( m_bSetupReaders )
{
tWord.SetDocReader ( m_pDoclist );
// read in skiplist
// OPTIMIZE? maybe add an option to decompress on preload instead?
if ( m_pSkips && tRes.m_iDocs>m_iSkiplistBlockSize )
{
int iSkips = tRes.m_iDocs/m_iSkiplistBlockSize;
const int SMALL_SKIP_THRESH = 256;
bool bNeedCache = iSkips > SMALL_SKIP_THRESH;
bool & bFromCache = tWord.m_bSkipFromCache;
bFromCache = bNeedCache && SkipCache::Find ( { m_pIndex->GetIndexId(), tWord.m_uWordID }, tWord.m_pSkipData );
if ( !bFromCache )
{
tWord.m_pSkipData = new SkipData_t;
tWord.m_pSkipData->Read ( m_pSkips, tRes, tWord.m_iDocs, m_iSkiplistBlockSize );
bFromCache = bNeedCache && SkipCache::Add ( { m_pIndex->GetIndexId(), tWord.m_uWordID }, tWord.m_pSkipData );
}
}
tWord.m_rdDoclist->SeekTo ( tRes.m_iDoclistOffset, tRes.m_iDoclistHint );
tWord.SetHitReader ( m_pHitlist );
}
return true;
}
QwordScan_c::QwordScan_c ( int iRowsCount )
: m_iRowsCount ( iRowsCount )
{
m_bDone = ( m_iRowsCount==0 );
m_iDocs = m_iRowsCount;
m_sWord = "";
m_sDictWord = "";
m_bExcluded = true;
m_dQwordFields.SetAll();
}
const CSphMatch & QwordScan_c::GetNextDoc()
{
if ( m_bDone )
{
m_tDoc.m_tRowID = INVALID_ROWID;
return m_tDoc;
}
// at the RT index some rows could be killed at any segment
while ( true )
{
if ( m_tDoc.m_tRowID==INVALID_ROWID )
m_tDoc.m_tRowID = 0;
else
m_tDoc.m_tRowID++;
if ( m_tDoc.m_tRowID>=m_iRowsCount )
{
m_bDone = true;
m_tDoc.m_tRowID = INVALID_ROWID;
break;
}
if ( IsAliveRow ( m_tDoc.m_tRowID ) )
break;
}
return m_tDoc;
}
ISphQword * DiskIndexQwordSetup_c::ScanSpawn() const
{
return new QwordScan_c ( m_iRowsCount );
}
//////////////////////////////////////////////////////////////////////////////
bool CSphIndex_VLN::Lock ()
{
CSphString sName = GetFilename ( SPH_EXT_SPL );
sphLogDebug ( "Locking the table via file %s", sName.cstr() );
return RawFileLock ( sName, m_iLockFD, m_sLastError );
}
void CSphIndex_VLN::Unlock()
{
CSphString sName = GetFilename ( SPH_EXT_SPL );
if ( m_iLockFD<0 )
return;
sphLogDebug ( "Unlocking the table (lock %s)", sName.cstr() );
RawFileUnLock ( sName, m_iLockFD );
}
void CSphIndex_VLN::Dealloc ()
{
if ( !m_bPassedAlloc )
return;
m_pDoclistFile = nullptr;
m_pHitlistFile = nullptr;
m_pColumnar = nullptr;
m_tSI.Reset();
m_tAttr.Reset ();
m_tBlobAttrs.Reset();
m_tSkiplists.Reset ();
m_tWordlist.Reset ();
m_tDeadRowMap.Dealloc();
m_tDocidLookup.Reset();
m_pDocstore.reset();
m_iDocinfo = 0;
m_iMinMaxIndex = 0;
m_bPassedRead = false;
m_bPassedAlloc = false;
m_uAttrsStatus = 0;
QcacheDeleteIndex ( m_iIndexId );
SkipCache::DeleteAll ( m_iIndexId );
m_iIndexId = GetIndexUid();
}
CSphIndex_VLN::LOAD_E CSphIndex_VLN::LoadHeaderLegacy ( const CSphString& sHeaderName, bool bStripPath, CSphEmbeddedFiles & tEmbeddedFiles, FilenameBuilder_i * pFilenameBuilder, CSphString & sWarning )
{
const int MAX_HEADER_SIZE = 32768;
CSphFixedVector<BYTE> dCacheInfo ( MAX_HEADER_SIZE );
m_sLastError = "";
CSphAutoreader rdInfo ( dCacheInfo.Begin(), MAX_HEADER_SIZE ); // to avoid mallocs
if ( !rdInfo.Open ( sHeaderName, m_sLastError ) )
return LOAD_E::GeneralError_e;
// magic header
const char* sFmt = CheckFmtMagic ( rdInfo.GetDword () );
if ( sFmt )
{
m_sLastError.SetSprintf ( sFmt, sHeaderName.cstr() );
return LOAD_E::ParseError_e;
}
// version
m_uVersion = rdInfo.GetDword();
if ( m_uVersion<=1 || m_uVersion>INDEX_FORMAT_VERSION )
{
m_sLastError.SetSprintf ( "%s is v.%u, binary is v.%u", sHeaderName.cstr(), m_uVersion, INDEX_FORMAT_VERSION );
return LOAD_E::GeneralError_e;
}
// we don't support anything prior to v54
DWORD uMinFormatVer = 54;
if ( m_uVersion<uMinFormatVer )
{
m_sLastError.SetSprintf ( "tables prior to v.%u are no longer supported (use index_converter tool); %s is v.%u", uMinFormatVer, sHeaderName.cstr(), m_uVersion );
return LOAD_E::GeneralError_e;
}
// schema
ReadSchema ( rdInfo, m_tSchema, m_uVersion );
// check schema for dupes
for ( int iAttr=0; iAttr<m_tSchema.GetAttrsCount(); iAttr++ )
{
const CSphColumnInfo & tCol = m_tSchema.GetAttr(iAttr);
for ( int i=0; i<iAttr; i++ )
if ( m_tSchema.GetAttr(i).m_sName==tCol.m_sName )
sWarning.SetSprintf ( "duplicate attribute name: %s", tCol.m_sName.cstr() );
}
// dictionary header (wordlist checkpoints, infix blocks, etc)
m_tWordlist.m_iDictCheckpointsOffset = rdInfo.GetOffset();
m_tWordlist.m_iDictCheckpoints = rdInfo.GetDword();
m_tWordlist.m_iInfixCodepointBytes = rdInfo.GetByte();
m_tWordlist.m_iInfixBlocksOffset = rdInfo.GetDword();
m_tWordlist.m_iInfixBlocksWordsSize = rdInfo.GetDword();
m_tWordlist.m_dCheckpoints.Reset ( m_tWordlist.m_iDictCheckpoints );
// index stats
m_tStats.m_iTotalDocuments = rdInfo.GetDword ();
m_tStats.m_iTotalBytes = rdInfo.GetOffset ();
LoadIndexSettings ( m_tSettings, rdInfo, m_uVersion );
CSphTokenizerSettings tTokSettings;
// tokenizer stuff
if ( !tTokSettings.Load ( pFilenameBuilder, rdInfo, tEmbeddedFiles, m_sLastError ) )
return LOAD_E::GeneralError_e;
if ( bStripPath )
StripPath ( tTokSettings.m_sSynonymsFile );
StrVec_t dWarnings;
TokenizerRefPtr_c pTokenizer = Tokenizer::Create ( tTokSettings, &tEmbeddedFiles, pFilenameBuilder, dWarnings, m_sLastError );
if ( !pTokenizer )
return LOAD_E::GeneralError_e;
// dictionary stuff
CSphDictSettings tDictSettings;
tDictSettings.Load ( rdInfo, tEmbeddedFiles, pFilenameBuilder, sWarning );
if ( bStripPath )
{
ARRAY_FOREACH ( i, tDictSettings.m_dWordforms )
StripPath ( tDictSettings.m_dWordforms[i] );
}
DictRefPtr_c pDict { tDictSettings.m_bWordDict
? sphCreateDictionaryKeywords ( tDictSettings, &tEmbeddedFiles, pTokenizer, GetName(), bStripPath, m_tSettings.m_iSkiplistBlockSize, pFilenameBuilder, m_sLastError )
: sphCreateDictionaryCRC ( tDictSettings, &tEmbeddedFiles, pTokenizer, GetName(), bStripPath, m_tSettings.m_iSkiplistBlockSize, pFilenameBuilder, m_sLastError )};
if ( !pDict )
return LOAD_E::GeneralError_e;
if ( tDictSettings.m_sMorphFingerprint!=pDict->GetMorphDataFingerprint() )
sWarning.SetSprintf ( "different lemmatizer dictionaries (table='%s', current='%s')",
tDictSettings.m_sMorphFingerprint.cstr(),
pDict->GetMorphDataFingerprint().cstr() );
SetDictionary ( pDict );
Tokenizer::AddToMultiformFilterTo ( pTokenizer, pDict->GetMultiWordforms () );
SetTokenizer ( pTokenizer );
SetupQueryTokenizer();
// initialize AOT if needed
m_tSettings.m_uAotFilterMask = sphParseMorphAot ( tDictSettings.m_sMorphology.cstr() );
m_iDocinfo = rdInfo.GetOffset ();
m_iDocinfoIndex = rdInfo.GetOffset ();
m_iMinMaxIndex = rdInfo.GetOffset ();
std::unique_ptr<ISphFieldFilter> pFieldFilter;
CSphFieldFilterSettings tFieldFilterSettings;
tFieldFilterSettings.Load(rdInfo);
if ( tFieldFilterSettings.m_dRegexps.GetLength() )
pFieldFilter = sphCreateRegexpFilter ( tFieldFilterSettings, m_sLastError );
if ( !sphSpawnFilterICU ( pFieldFilter, m_tSettings, tTokSettings, sHeaderName.cstr(), m_sLastError ) )
return LOAD_E::GeneralError_e;
SetFieldFilter ( std::move ( pFieldFilter ) );
if ( m_tSettings.m_bIndexFieldLens )
for ( int i=0; i < m_tSchema.GetFieldsCount(); i++ )
m_dFieldLens[i] = rdInfo.GetOffset(); // FIXME? ideally 64bit even when off is 32bit..
// post-load stuff.. for now, bigrams
CSphIndexSettings & s = m_tSettings;
if ( s.m_eBigramIndex!=SPH_BIGRAM_NONE && s.m_eBigramIndex!=SPH_BIGRAM_ALL )
{
BYTE * pTok;
m_pTokenizer->SetBuffer ( (BYTE*)const_cast<char*> ( s.m_sBigramWords.cstr() ), s.m_sBigramWords.Length() );
while ( ( pTok = m_pTokenizer->GetToken() )!=NULL )
s.m_dBigramWords.Add() = (const char*)pTok;
s.m_dBigramWords.Sort();
}
if ( rdInfo.GetErrorFlag() )
m_sLastError.SetSprintf ( "%s: failed to parse header (unexpected eof)", sHeaderName.cstr() );
return rdInfo.GetErrorFlag() ? LOAD_E::GeneralError_e : LOAD_E::Ok_e;
}
CSphIndex_VLN::LOAD_E CSphIndex_VLN::LoadHeaderJson ( const CSphString& sHeaderName, bool bStripPath, CSphEmbeddedFiles & tEmbeddedFiles, FilenameBuilder_i * pFilenameBuilder, CSphString & sWarning )
{
using namespace bson;
CSphVector<BYTE> dData;
if ( !sphJsonParse ( dData, sHeaderName, m_sLastError ) )
return LOAD_E::ParseError_e;
Bson_c tBson ( dData );
if ( tBson.IsEmpty() || !tBson.IsAssoc() )
{
m_sLastError = "Something wrong read from json header - it is either empty, either not root object.";
return LOAD_E::ParseError_e;
}
// version
m_uVersion = (DWORD)Int ( tBson.ChildByName ( "index_format_version" ) );
if ( m_uVersion<=1 || m_uVersion>INDEX_FORMAT_VERSION )
{
m_sLastError.SetSprintf ( "%s is v.%u, binary is v.%u", sHeaderName.cstr(), m_uVersion, INDEX_FORMAT_VERSION );
return LOAD_E::GeneralError_e;
}
// we don't support anything prior to v54
DWORD uMinFormatVer = 54;
if ( m_uVersion<uMinFormatVer )
{
m_sLastError.SetSprintf ( "tables prior to v.%u are no longer supported (use index_converter tool); %s is v.%u", uMinFormatVer, sHeaderName.cstr(), m_uVersion );
return LOAD_E::GeneralError_e;
}
// index stats
m_tStats.m_iTotalDocuments = Int ( tBson.ChildByName ( "total_documents" ) );
m_tStats.m_iTotalBytes = Int ( tBson.ChildByName ( "total_bytes" ) );
// schema
ReadSchemaJson ( tBson.ChildByName ( "schema" ), m_tSchema );
// check schema for dupes
for ( int iAttr = 0; iAttr < m_tSchema.GetAttrsCount(); ++iAttr )
{
const CSphColumnInfo& tCol = m_tSchema.GetAttr ( iAttr );
for ( int i = 0; i < iAttr; ++i )
if ( m_tSchema.GetAttr ( i ).m_sName == tCol.m_sName )
sWarning.SetSprintf ( "duplicate attribute name: %s", tCol.m_sName.cstr() );
}
// index settings
LoadIndexSettingsJson ( tBson.ChildByName ( "index_settings" ), m_tSettings );
CSphTokenizerSettings tTokSettings;
// tokenizer stuff
if ( !tTokSettings.Load ( pFilenameBuilder, tBson.ChildByName ( "tokenizer_settings" ), tEmbeddedFiles, m_sLastError ) )
return LOAD_E::GeneralError_e;
// dictionary stuff
CSphDictSettings tDictSettings;
tDictSettings.Load ( tBson.ChildByName ( "dictionary_settings" ), tEmbeddedFiles, pFilenameBuilder, sWarning );
// dictionary header (wordlist checkpoints, infix blocks, etc)
m_tWordlist.m_iDictCheckpointsOffset = Int ( tBson.ChildByName ( "dict_checkpoints_offset" ) );
m_tWordlist.m_iDictCheckpoints = (int)Int ( tBson.ChildByName ( "dict_checkpoints" ) );
m_tWordlist.m_iInfixCodepointBytes = (int)Int ( tBson.ChildByName ( "infix_codepoint_bytes" ) );
m_tWordlist.m_iInfixBlocksOffset = Int ( tBson.ChildByName ( "infix_blocks_offset" ) );
m_tWordlist.m_iInfixBlocksWordsSize = (int)Int ( tBson.ChildByName ( "infix_block_words_size" ) );
m_tWordlist.m_dCheckpoints.Reset ( m_tWordlist.m_iDictCheckpoints );
if ( bStripPath )
{
StripPath ( tTokSettings.m_sSynonymsFile );
for ( auto& i : tDictSettings.m_dWordforms )
StripPath ( i );
}
StrVec_t dWarnings;
TokenizerRefPtr_c pTokenizer = Tokenizer::Create ( tTokSettings, &tEmbeddedFiles, pFilenameBuilder, dWarnings, m_sLastError );
if ( !pTokenizer )
return LOAD_E::GeneralError_e;
DictRefPtr_c pDict { tDictSettings.m_bWordDict
? sphCreateDictionaryKeywords ( tDictSettings, &tEmbeddedFiles, pTokenizer, GetName(), bStripPath, m_tSettings.m_iSkiplistBlockSize, pFilenameBuilder, m_sLastError )
: sphCreateDictionaryCRC ( tDictSettings, &tEmbeddedFiles, pTokenizer, GetName(), bStripPath, m_tSettings.m_iSkiplistBlockSize, pFilenameBuilder, m_sLastError )};
if ( !pDict )
return LOAD_E::GeneralError_e;
if ( tDictSettings.m_sMorphFingerprint!=pDict->GetMorphDataFingerprint() )
sWarning.SetSprintf ( "different lemmatizer dictionaries (table='%s', current='%s')",
tDictSettings.m_sMorphFingerprint.cstr(),
pDict->GetMorphDataFingerprint().cstr() );
SetDictionary ( pDict );
Tokenizer::AddToMultiformFilterTo ( pTokenizer, pDict->GetMultiWordforms () );
SetTokenizer ( pTokenizer );
SetupQueryTokenizer();
// initialize AOT if needed
m_tSettings.m_uAotFilterMask = sphParseMorphAot ( tDictSettings.m_sMorphology.cstr() );
m_iDocinfo = Int ( tBson.ChildByName ( "docinfo" ) );
m_iDocinfoIndex = Int ( tBson.ChildByName ( "docinfo_index" ) );
m_iMinMaxIndex = Int ( tBson.ChildByName ( "min_max_index" ) );
std::unique_ptr<ISphFieldFilter> pFieldFilter;
auto tFieldFilterSettingsNode = tBson.ChildByName ( "field_filter_settings" );
if ( !IsNullNode(tFieldFilterSettingsNode) )
{
CSphFieldFilterSettings tFieldFilterSettings;
Bson_c ( tFieldFilterSettingsNode ).ForEach ( [&tFieldFilterSettings] ( const NodeHandle_t& tNode ) {
tFieldFilterSettings.m_dRegexps.Add ( String ( tNode ) );
} );
if ( !tFieldFilterSettings.m_dRegexps.IsEmpty() )
pFieldFilter = sphCreateRegexpFilter ( tFieldFilterSettings, m_sLastError );
}
if ( !sphSpawnFilterICU ( pFieldFilter, m_tSettings, tTokSettings, sHeaderName.cstr(), m_sLastError ) )
return LOAD_E::GeneralError_e;
if ( !SpawnFilterJieba ( pFieldFilter, m_tSettings, tTokSettings, sHeaderName.cstr(), pFilenameBuilder, m_sLastError ) )
return LOAD_E::GeneralError_e;
SetFieldFilter ( std::move ( pFieldFilter ) );
auto tIndexFieldsLenNode = tBson.ChildByName ( "index_fields_lens" );
if ( m_tSettings.m_bIndexFieldLens )
{
assert (!IsNullNode ( tIndexFieldsLenNode ));
m_dFieldLens.Reset ( m_tSchema.GetFieldsCount() );
int i = 0;
Bson_c ( tIndexFieldsLenNode ).ForEach ( [&i,this] ( const NodeHandle_t& tNode ) {
m_dFieldLens[i++] = Int ( tNode );
} );
}
// post-load stuff.. for now, bigrams
CSphIndexSettings & s = m_tSettings;
if ( s.m_eBigramIndex!=SPH_BIGRAM_NONE && s.m_eBigramIndex!=SPH_BIGRAM_ALL )
{
BYTE * pTok;
m_pTokenizer->SetBuffer ( (BYTE*)const_cast<char*> ( s.m_sBigramWords.cstr() ), s.m_sBigramWords.Length() );
while ( ( pTok = m_pTokenizer->GetToken() )!=nullptr )
s.m_dBigramWords.Add() = (const char*)pTok;
s.m_dBigramWords.Sort();
}
return LOAD_E::Ok_e;
}
void CSphIndex_VLN::DebugDumpHeader ( FILE * fp, const CSphString& sHeaderName, bool bConfig )
{
std::unique_ptr<FilenameBuilder_i> pFilenameBuilder;
if ( GetIndexFilenameBuilder() )
pFilenameBuilder = GetIndexFilenameBuilder() ( GetName() );
CSphEmbeddedFiles tEmbeddedFiles;
CSphString sWarning;
auto eRes = LoadHeaderJson ( sHeaderName, false, tEmbeddedFiles, pFilenameBuilder.get(), sWarning );
if ( eRes == LOAD_E::ParseError_e )
{
eRes = LoadHeaderLegacy ( sHeaderName, false, tEmbeddedFiles, pFilenameBuilder.get(), sWarning );
if ( eRes == LOAD_E::ParseError_e )
sphDie ( "failed to load header: %s", m_sLastError.cstr() );
}
if ( eRes == LOAD_E::GeneralError_e )
sphDie ( "failed to load header: %s", m_sLastError.cstr() );
assert ( eRes == LOAD_E::Ok_e );
if ( !sWarning.IsEmpty () )
fprintf ( fp, "WARNING: %s\n", sWarning.cstr () );
///////////////////////////////////////////////
// print header in index config section format
///////////////////////////////////////////////
if ( bConfig )
{
fprintf ( fp, "\nsource $dump\n{\n" );
fprintf ( fp, "\tsql_query = SELECT id \\\n" );
for ( int i=0; i < m_tSchema.GetFieldsCount(); i++ )
fprintf ( fp, "\t, %s \\\n", m_tSchema.GetFieldName(i) );
for ( int i=0; i<m_tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = m_tSchema.GetAttr(i);
fprintf ( fp, "\t, %s \\\n", tAttr.m_sName.cstr() );
}
fprintf ( fp, "\tFROM documents\n" );
if ( m_tSchema.GetAttrsCount() )
fprintf ( fp, "\n" );
for ( int i=0; i<m_tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = m_tSchema.GetAttr(i);
if ( tAttr.m_eAttrType==SPH_ATTR_UINT32SET )
fprintf ( fp, "\tsql_attr_multi = uint %s from field\n", tAttr.m_sName.cstr() );
else if ( tAttr.m_eAttrType==SPH_ATTR_INT64SET )
fprintf ( fp, "\tsql_attr_multi = bigint %s from field\n", tAttr.m_sName.cstr() );
else if ( tAttr.m_eAttrType==SPH_ATTR_INTEGER && tAttr.m_tLocator.IsBitfield() )
fprintf ( fp, "\tsql_attr_uint = %s:%d\n", tAttr.m_sName.cstr(), tAttr.m_tLocator.m_iBitCount );
else if ( tAttr.m_eAttrType==SPH_ATTR_TOKENCOUNT )
{; // intendedly skip, as these are autogenerated by index_field_lengths=1
} else
fprintf ( fp, "\t%s = %s\n", sphTypeDirective ( tAttr.m_eAttrType ), tAttr.m_sName.cstr() );
}
fprintf ( fp, "}\n\nindex $dump\n{\n\tsource = $dump\n\tpath = $dump\n" );
DumpSettingsCfg ( fp, *this, pFilenameBuilder.get() );
fprintf ( fp, "}\n" );
return;
}
///////////////////////////////////////////////
// print header and stats in "readable" format
///////////////////////////////////////////////
fprintf ( fp, "version: %u\n", m_uVersion );
fprintf ( fp, "idbits: 64\n" );
fprintf ( fp, "fields: %d\n", m_tSchema.GetFieldsCount() );
for ( int i = 0; i < m_tSchema.GetFieldsCount(); i++ )
fprintf ( fp, " field %d: %s\n", i, m_tSchema.GetFieldName(i) );
fprintf ( fp, "attrs: %d\n", m_tSchema.GetAttrsCount() );
for ( int i=0; i<m_tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = m_tSchema.GetAttr(i);
fprintf ( fp, " attr %d: %s, %s", i, tAttr.m_sName.cstr(), sphTypeName ( tAttr.m_eAttrType ) );
if ( tAttr.m_eAttrType==SPH_ATTR_INTEGER && tAttr.m_tLocator.m_iBitCount!=32 )
fprintf ( fp, ", bits %d", tAttr.m_tLocator.m_iBitCount );
fprintf ( fp, ", bitoff %d\n", tAttr.m_tLocator.m_iBitOffset );
}
// skipped min doc, wordlist checkpoints
fprintf ( fp, "total-documents: " INT64_FMT "\n", m_tStats.m_iTotalDocuments );
fprintf ( fp, "total-bytes: " INT64_FMT "\n", int64_t(m_tStats.m_iTotalBytes) );
DumpReadable ( fp, *this, tEmbeddedFiles, pFilenameBuilder.get() );
fprintf ( fp, "\nmin-max-index: " INT64_FMT "\n", m_iMinMaxIndex );
}
void CSphIndex_VLN::DebugDumpDocids ( FILE * fp )
{
const int iRowStride = m_tSchema.GetRowSize();
const int64_t iNumMinMaxRow = (m_iDocinfoIndex+1)*iRowStride*2;
const int64_t iNumRows = m_iDocinfo;
const int64_t iDocinfoSize = iRowStride*m_iDocinfo*sizeof(DWORD);
const int64_t iMinmaxSize = iNumMinMaxRow*sizeof(CSphRowitem);
fprintf ( fp, "docinfo-bytes: docinfo=" INT64_FMT ", min-max=" INT64_FMT ", total=" UINT64_FMT "\n", iDocinfoSize, iMinmaxSize, (uint64_t)m_tAttr.GetLengthBytes() );
fprintf ( fp, "docinfo-stride: %d\n", (int)(iRowStride*sizeof(DWORD)) );
fprintf ( fp, "docinfo-rows: " INT64_FMT "\n", iNumRows );
if ( !m_tAttr.GetLength64() )
return;
const DWORD * pDocinfo = m_tAttr.GetReadPtr();
for ( int64_t iRow=0; iRow<iNumRows; iRow++, pDocinfo+=iRowStride )
printf ( INT64_FMT". id=" INT64_FMT "\n", iRow+1, sphGetDocID ( pDocinfo ) );
printf ( "--- min-max=" INT64_FMT " ---\n", iNumMinMaxRow );
for ( int64_t iRow=0; iRow<(m_iDocinfoIndex+1)*2; iRow++, pDocinfo+=iRowStride )
printf ( "id=" INT64_FMT "\n", sphGetDocID ( pDocinfo ) );
}
void CSphIndex_VLN::DebugDumpHitlist ( FILE * fp, const char * sKeyword, bool bID )
{
WITH_QWORD ( this, false, Qword, DumpHitlist<Qword> ( fp, sKeyword, bID ) );
}
template < class Qword >
void CSphIndex_VLN::DumpHitlist ( FILE * fp, const char * sKeyword, bool bID )
{
// get keyword id
SphWordID_t uWordID = 0;
BYTE * sTok = NULL;
if ( !bID )
{
CSphString sBuf ( sKeyword );
m_pTokenizer->SetBuffer ( (const BYTE*)sBuf.cstr(), (int) strlen ( sBuf.cstr() ) );
sTok = m_pTokenizer->GetToken();
if ( !sTok )
sphDie ( "keyword=%s, no token (too short?)", sKeyword );
uWordID = m_pDict->GetWordID ( sTok );
if ( !uWordID )
sphDie ( "keyword=%s, tok=%s, no wordid (stopped?)", sKeyword, sTok );
fprintf ( fp, "keyword=%s, tok=%s, wordid=" UINT64_FMT "\n", sKeyword, sTok, uint64_t(uWordID) );
} else
{
uWordID = (SphWordID_t) strtoull ( sKeyword, NULL, 10 );
if ( !uWordID )
sphDie ( "failed to convert keyword=%s to id (must be integer)", sKeyword );
fprintf ( fp, "wordid=" UINT64_FMT "\n", uint64_t(uWordID) );
}
// open files
DataReaderFactoryPtr_c pDoclist {
NewProxyReader ( GetFilename ( SPH_EXT_SPD ), m_sLastError, DataReaderFactory_c::DOCS,
m_tMutableSettings.m_tFileAccess.m_iReadBufferDocList, FileAccess_e::FILE )
};
if ( !pDoclist )
sphDie ( "failed to open doclist: %s", m_sLastError.cstr() );
DataReaderFactoryPtr_c pHitlist {
NewProxyReader ( GetFilename ( SPH_EXT_SPP ), m_sLastError, DataReaderFactory_c::HITS,
m_tMutableSettings.m_tFileAccess.m_iReadBufferHitList, FileAccess_e::FILE )
};
if ( !pHitlist )
sphDie ( "failed to open hitlist: %s", m_sLastError.cstr ());
// aim
DiskIndexQwordSetup_c tTermSetup ( pDoclist, pHitlist, m_tSkiplists.GetReadPtr(), m_tSettings.m_iSkiplistBlockSize, true, RowID_t(m_iDocinfo) );
tTermSetup.SetDict ( m_pDict );
tTermSetup.m_pIndex = this;
Qword tKeyword ( false, false, m_iIndexId );
tKeyword.m_uWordID = uWordID;
tKeyword.m_sWord = sKeyword;
tKeyword.m_sDictWord = (const char *)sTok;
if ( !tTermSetup.QwordSetup ( &tKeyword ) )
sphDie ( "failed to setup keyword" );
// press play on tape
while (true)
{
tKeyword.GetNextDoc();
if ( tKeyword.m_tDoc.m_tRowID==INVALID_ROWID )
break;
tKeyword.SeekHitlist ( tKeyword.m_iHitlistPos );
int iHits = 0;
if ( tKeyword.m_bHasHitlist )
for ( Hitpos_t uHit = tKeyword.GetNextHit(); uHit!=EMPTY_HIT; uHit = tKeyword.GetNextHit() )
{
fprintf ( fp, "doc=%u, hit=0x%08x\n", tKeyword.m_tDoc.m_tRowID, (uint32_t)uHit );
iHits++;
}
if ( !iHits )
{
uint64_t uOff = tKeyword.m_iHitlistPos;
fprintf ( fp, "doc=%u, NO HITS, inline=%d, off=" UINT64_FMT "\n", tKeyword.m_tDoc.m_tRowID, (int)(uOff>>63), (uOff<<1)>>1 );
}
}
}
void CSphIndex_VLN::DebugDumpDict ( FILE * fp, bool bDumpOnly )
{
if ( !m_pDict->GetSettings().m_bWordDict )
{
sphDie ( "DebugDumpDict() only supports dict=keywords for now" );
}
if ( !bDumpOnly )
fprintf ( fp, "keyword,docs,hits,offset\n" );
m_tWordlist.DebugPopulateCheckpoints();
ARRAY_FOREACH ( i, m_tWordlist.m_dCheckpoints )
{
KeywordsBlockReader_c tCtx ( m_tWordlist.AcquireDict ( &m_tWordlist.m_dCheckpoints[i] ), m_tSettings.m_iSkiplistBlockSize );
while ( tCtx.UnpackWord() )
fprintf ( fp, "%s,%d,%d," INT64_FMT "\n", tCtx.GetWord(), tCtx.m_iDocs, tCtx.m_iHits, int64_t(tCtx.m_iDoclistOffset) );
}
}
//////////////////////////////////////////////////////////////////////////
bool CSphIndex_VLN::SpawnReader ( DataReaderFactoryPtr_c & m_pFile, ESphExt eExt, DataReaderFactory_c::Kind_e eKind, int iBuffer, FileAccess_e eAccess )
{
if ( m_tMutableSettings.m_bPreopen || eAccess!=FileAccess_e::FILE )
{
m_pFile = NewProxyReader ( GetFilename ( eExt ), m_sLastError, eKind, iBuffer, eAccess );
if ( !m_pFile )
return false;
}
return true;
}
bool CSphIndex_VLN::SpawnReaders()
{
if ( !SpawnReader ( m_pDoclistFile, SPH_EXT_SPD, DataReaderFactory_c::DOCS, m_tMutableSettings.m_tFileAccess.m_iReadBufferDocList, m_tMutableSettings.m_tFileAccess.m_eDoclist ) )
return false;
if ( !SpawnReader ( m_pHitlistFile, SPH_EXT_SPP, DataReaderFactory_c::HITS, m_tMutableSettings.m_tFileAccess.m_iReadBufferHitList, m_tMutableSettings.m_tFileAccess.m_eHitlist ) )
return false;
return true;
}
bool CSphIndex_VLN::PreallocWordlist()
{
if ( !sphIsReadable ( GetFilename ( SPH_EXT_SPI ), &m_sLastError ) )
return false;
assert ( m_pDict );
bool bWordDict = m_pDict->GetSettings().m_bWordDict;
// only checkpoint and wordlist infixes are actually read here; dictionary itself is just mapped
if ( !m_tWordlist.Preread ( GetFilename ( SPH_EXT_SPI ), bWordDict, m_tSettings.m_iSkiplistBlockSize, m_sLastError ) )
return false;
if ( ( m_tWordlist.m_tBuf.GetLengthBytes()<=18 )!=( m_tWordlist.m_dCheckpoints.GetLength()==0 ) )
sphWarning ( "wordlist size mismatch (size=%zu, checkpoints=%d)", m_tWordlist.m_tBuf.GetLengthBytes(), m_tWordlist.m_dCheckpoints.GetLength() );
// make sure checkpoints are loadable
// pre-11 indices use different offset type (this is fixed up later during the loading)
assert ( m_tWordlist.m_iDictCheckpointsOffset>0 );
return true;
}
bool CSphIndex_VLN::PreallocAttributes()
{
if ( m_bIsEmpty || m_bDebugCheck )
return true;
if ( !m_tSchema.HasNonColumnarAttrs() )
return true;
if ( !m_tAttr.Setup ( GetFilename ( SPH_EXT_SPA ), m_sLastError, true ) )
return false;
if ( !CheckDocsCount ( m_iDocinfo, m_sLastError ) )
return false;
m_pDocinfoIndex = m_tAttr.GetWritePtr() + m_iMinMaxIndex;
if ( m_tSchema.GetAttr ( sphGetBlobLocatorName() ) )
{
if ( !m_tBlobAttrs.Setup ( GetFilename ( SPH_EXT_SPB ), m_sLastError, true ) )
return false;
}
return true;
}
bool CSphIndex_VLN::PreallocDocidLookup()
{
if ( m_bIsEmpty || m_bDebugCheck )
return true;
if ( !m_tDocidLookup.Setup ( GetFilename ( SPH_EXT_SPT ), m_sLastError, false ) )
return false;
m_tLookupReader.SetData ( m_tDocidLookup.GetReadPtr() );
return true;
}
bool CSphIndex_VLN::PreallocKilllist()
{
if ( m_bDebugCheck )
return true;
return m_tDeadRowMap.Prealloc ( (DWORD)m_iDocinfo, GetFilename ( SPH_EXT_SPM ), m_sLastError );
}
bool CSphIndex_VLN::PreallocHistograms ( StrVec_t & dWarnings )
{
if ( m_bDebugCheck )
return true;
// we have histograms since v.56, but in v.61 we switched to streamed histograms with no backward compatibility
if ( m_uVersion<61 )
return true;
CSphString sHistogramFile = GetFilename ( SPH_EXT_SPHI );
if ( !sphIsReadable ( sHistogramFile.cstr() ) )
return true;
SafeDelete ( m_pHistograms );
m_pHistograms = new HistogramContainer_c;
if ( !m_pHistograms->Load ( sHistogramFile, m_sLastError ) )
{
SafeDelete ( m_pHistograms );
if ( !m_sLastError.IsEmpty() )
dWarnings.Add(m_sLastError);
}
// even if we fail to load the histograms, return true (histograms are optional anyway)
return true;
}
bool CSphIndex_VLN::PreallocDocstore()
{
if ( m_uVersion<57 )
return true;
if ( !m_tSchema.HasStoredFields() && !m_tSchema.HasStoredAttrs() )
return true;
m_pDocstore = CreateDocstore ( m_iIndexId, GetFilename ( SPH_EXT_SPDS ), m_sLastError );
return !!m_pDocstore;
}
bool CSphIndex_VLN::PreallocColumnar()
{
if ( m_uVersion<61 )
return true;
if ( !m_tSchema.HasColumnarAttrs() )
return true;
m_pColumnar = CreateColumnarStorageReader ( GetFilename ( SPH_EXT_SPC ), (DWORD)m_iDocinfo, m_sLastError );
return !!m_pColumnar;
}
bool CSphIndex_VLN::PreallocKNN()
{
if ( m_uVersion<65 )
return true;
if ( !m_tSchema.HasKNNAttrs() )
return true;
m_pKNN = CreateKNN(m_sLastError);
if ( !m_pKNN )
return false;
std::string sErrorSTL;
if ( !m_pKNN->Load ( GetFilename ( SPH_EXT_SPKNN ).cstr(), sErrorSTL ) )
{
m_sLastError = sErrorSTL.c_str();
return false;
}
return !!m_pKNN;
}
bool CSphIndex_VLN::PreallocSkiplist()
{
if ( m_bDebugCheck )
return true;
return m_tSkiplists.Setup ( GetFilename ( SPH_EXT_SPE ), m_sLastError, false );
}
bool CSphIndex_VLN::LoadSecondaryIndex ( const CSphString & sFile )
{
if ( !sphFileExists ( sFile.cstr() ) )
{
if ( GetSecondaryIndexDefault()!=SIDefault_e::DISABLED )
{
if ( GetSecondaryIndexDefault()==SIDefault_e::FORCE )
m_sLastError.SetSprintf ( "missing secondary index %s", sFile.cstr() );
else
sphWarning ( "missing %s; secondary index(es) disabled, consider using ALTER REBUILD SECONDARY to recover the secondary index", sFile.cstr() );
}
return GetSecondaryIndexDefault()!=SIDefault_e::FORCE;
}
if ( !m_tSI.Load ( sFile, m_sLastError ) && GetSecondaryIndexDefault()!=SIDefault_e::DISABLED )
{
if ( GetSecondaryIndexDefault()!=SIDefault_e::FORCE )
{
sphWarning ( "'%s': secondary index not loaded, %s; secondary index(es) disabled, consider using ALTER REBUILD SECONDARY to recover the secondary index", GetName(), m_sLastError.cstr() );
m_sLastError = "";
}
if ( GetSecondaryIndexDefault()==SIDefault_e::FORCE )
return false;
}
return true;
}
bool CSphIndex_VLN::PreallocSecondaryIndex()
{
if ( m_uVersion<61 )
return true;
if ( !IsSecondaryLibLoaded() )
{
if ( GetSecondaryIndexDefault()!=SIDefault_e::DISABLED )
{
if ( GetSecondaryIndexDefault()==SIDefault_e::FORCE )
m_sLastError = "secondary library not loaded";
else
sphWarning ( "secondary library not loaded; secondary index(es) disabled" );
}
return ( GetSecondaryIndexDefault()!=SIDefault_e::FORCE );
}
if ( !LoadSecondaryIndex ( GetFilename(SPH_EXT_SPIDX) ) )
return false;
if ( m_tSchema.HasJsonSIAttrs() && !LoadSecondaryIndex ( GetFilename(SPH_EXT_SPJIDX) ) )
return false;
return true;
}
bool CSphIndex_VLN::Prealloc ( bool bStripPath, FilenameBuilder_i * pFilenameBuilder, StrVec_t & dWarnings )
{
MEMORY ( MEM_INDEX_DISK );
Dealloc();
CSphEmbeddedFiles tEmbeddedFiles;
// preload schema
auto eRes = LoadHeaderJson ( GetFilename ( SPH_EXT_SPH ), bStripPath, tEmbeddedFiles, pFilenameBuilder, m_sLastWarning ) ;
if ( eRes == LOAD_E::ParseError_e )
{
sphInfo ( "Index header format is not json, will try it as binary..." );
eRes = LoadHeaderLegacy ( GetFilename ( SPH_EXT_SPH ), bStripPath, tEmbeddedFiles, pFilenameBuilder, m_sLastWarning );
if ( eRes == LOAD_E::ParseError_e )
{
sphWarning ( "Unable to parse header... Error %s", m_sLastError.cstr() );
return false;
}
}
if ( eRes == LOAD_E::GeneralError_e )
{
sphWarning ( "Unable to load header... Error %s", m_sLastError.cstr() );
return false;
}
assert ( eRes == LOAD_E::Ok_e );
m_bIsEmpty = !m_iDocinfo;
tEmbeddedFiles.Reset();
// verify that data files are readable
for ( auto& eExt : { SPH_EXT_SPD, SPH_EXT_SPP, SPH_EXT_SPE } )
if ( !sphIsReadable ( GetFilename ( eExt ), &m_sLastError ) )
return false;
if ( !SpawnReaders() )
return false;
if ( !PreallocWordlist() ) return false;
if ( !PreallocAttributes() ) return false;
if ( !PreallocDocidLookup() ) return false;
if ( !PreallocKilllist() ) return false;
if ( !PreallocHistograms(dWarnings) ) return false;
if ( !PreallocDocstore() ) return false;
if ( !PreallocColumnar() ) return false;
if ( !PreallocKNN() ) return false;
if ( !PreallocSkiplist() ) return false;
if ( !PreallocSecondaryIndex() ) return false;
// almost done
m_bPassedAlloc = true;
return true;
}
void CSphIndex_VLN::Preread()
{
MEMORY ( MEM_INDEX_DISK );
sphLogDebug ( "CSphIndex_VLN::Preread invoked '%s'(%s)", GetName(), GetFilebase() );
assert ( m_bPassedAlloc );
if ( m_bPassedRead )
return;
///////////////////
// read everything
///////////////////
PrereadMapping ( GetName(), "attributes", IsMlock ( m_tMutableSettings.m_tFileAccess.m_eAttr ), IsOndisk ( m_tMutableSettings.m_tFileAccess.m_eAttr ), m_tAttr );
if ( sphInterrupted() ) return;
PrereadMapping ( GetName(), "blobs", IsMlock ( m_tMutableSettings.m_tFileAccess.m_eBlob ), IsOndisk ( m_tMutableSettings.m_tFileAccess.m_eBlob ), m_tBlobAttrs );
if ( sphInterrupted() ) return;
PrereadMapping ( GetName(), "skip-list", IsMlock ( m_tMutableSettings.m_tFileAccess.m_eAttr ), false, m_tSkiplists );
if ( sphInterrupted() ) return;
PrereadMapping ( GetName(), "dictionary", IsMlock ( m_tMutableSettings.m_tFileAccess.m_eDict ), IsOndisk ( m_tMutableSettings.m_tFileAccess.m_eDict ), m_tWordlist.m_tBuf );
if ( sphInterrupted() ) return;
PrereadMapping ( GetName(), "docid-lookup", IsMlock ( m_tMutableSettings.m_tFileAccess.m_eAttr ), false, m_tDocidLookup );
if ( sphInterrupted() ) return;
m_tDeadRowMap.Preread ( GetName(), "kill-list", IsMlock ( m_tMutableSettings.m_tFileAccess.m_eAttr ) );
if ( sphInterrupted() ) return;
m_bPassedRead = true;
sphLogDebug ( "Preread successfully finished" );
}
CSphIndex::RenameResult_e CSphIndex_VLN::RenameEx ( CSphString sNewBase )
{
if ( sNewBase == GetFilebase() )
return RE_OK;
IndexFiles_c dFiles ( GetFilebase(), nullptr, m_uVersion );
if ( !dFiles.TryRenameBase ( sNewBase ) )
{
m_sLastError = dFiles.ErrorMsg ();
return dFiles.IsFatal() ? RE_FATAL : RE_FAIL;
}
if ( !dFiles.RenameLock ( sNewBase, m_iLockFD ) )
{
m_sLastError = dFiles.ErrorMsg ();
return dFiles.IsFatal() ? RE_FATAL : RE_FAIL;
}
SetFilebase ( std::move ( sNewBase ) );
return RE_OK;
}
bool CSphIndex::IsStarDict ( bool bWordDict ) const
{
return m_tSettings.GetMinPrefixLen ( bWordDict )>0 || m_tSettings.m_iMinInfixLen>0;
}
void CSphIndex_VLN::SetupStarDict ( DictRefPtr_c &pDict ) const
{
// spawn wrapper, and put it in the box
// wrapper type depends on version; v.8 introduced new mangling rules
if ( IsStarDict ( pDict->GetSettings().m_bWordDict ) )
::SetupStarDictV8 ( pDict, m_tSettings.m_iMinInfixLen>0 );
// FIXME? might wanna verify somehow that the tokenizer has '*' as a character
}
void CSphIndex_VLN::SetupExactDict ( DictRefPtr_c &pDict ) const
{
if ( m_tSettings.m_bIndexExactWords )
::SetupExactDict ( pDict );
}
bool CSphIndex_VLN::GetKeywords ( CSphVector <CSphKeywordInfo> & dKeywords, const char * szQuery, const GetKeywordsSettings_t & tSettings, CSphString * pError ) const
{
WITH_QWORD ( this, false, Qword, return DoGetKeywords<Qword> ( dKeywords, szQuery, tSettings, false, pError ) );
return false;
}
void CSphIndex_VLN::GetSuggest ( const SuggestArgs_t & tArgs, SuggestResult_t & tRes ) const
{
if ( m_tWordlist.m_tBuf.IsEmpty() || !m_tWordlist.m_dCheckpoints.GetLength() )
return;
assert ( !tRes.m_pWordReader );
tRes.m_pWordReader = new KeywordsBlockReader_c ( m_tWordlist.m_tBuf.GetReadPtr(), m_tSettings.m_iSkiplistBlockSize );
tRes.m_bHasExactDict = m_tSettings.m_bIndexExactWords;
sphGetSuggest ( &m_tWordlist, m_tWordlist.m_iInfixCodepointBytes, tArgs, tRes );
KeywordsBlockReader_c * pReader = (KeywordsBlockReader_c *)tRes.m_pWordReader;
SafeDelete ( pReader );
tRes.m_pWordReader = NULL;
}
DWORD sphParseMorphAot ( const char * sMorphology )
{
if ( !sMorphology || !*sMorphology )
return 0;
StrVec_t dMorphs;
sphSplit ( dMorphs, sMorphology );
DWORD uAotFilterMask = 0;
for ( int j=0; j<AOT_LENGTH; ++j )
{
char buf_all[20];
snprintf ( buf_all, 19, "lemmatize_%s_all", AOT_LANGUAGES[j] ); // NOLINT
buf_all[19] = '\0';
ARRAY_FOREACH ( i, dMorphs )
{
if ( dMorphs[i]==buf_all )
{
uAotFilterMask |= (1UL) << j;
break;
}
}
}
return uAotFilterMask;
}
template < class Qword >
bool CSphIndex_VLN::DoGetKeywords ( CSphVector <CSphKeywordInfo> & dKeywords, const char * szQuery, const GetKeywordsSettings_t & tSettings, bool bFillOnly, CSphString * pError ) const
{
if ( !bFillOnly )
dKeywords.Resize ( 0 );
if ( !m_bPassedAlloc )
{
if ( pError )
*pError = "table not preread";
return false;
}
// short-cut if no query or keywords to fill
if ( ( bFillOnly && !dKeywords.GetLength() ) || ( !bFillOnly && ( !szQuery || !szQuery[0] ) ) )
return true;
DictRefPtr_c pDict = GetStatelessDict ( m_pDict );
SetupStarDict ( pDict );
SetupExactDict ( pDict );
CSphVector<BYTE> dFiltered;
const BYTE * sModifiedQuery = (const BYTE *)szQuery;
if ( m_pFieldFilter && szQuery && m_pFieldFilter->Clone()->Apply ( sModifiedQuery, dFiltered, true ) )
sModifiedQuery = dFiltered.Begin();
// FIXME!!! missed bigram, add flags to fold blended parts, show expanded terms
// prepare for setup
DiskIndexQwordSetup_c tTermSetup ( DataReaderFactoryPtr_c{}, DataReaderFactoryPtr_c{}, m_tSkiplists.GetReadPtr(), m_tSettings.m_iSkiplistBlockSize, false, RowID_t(m_iDocinfo) );
tTermSetup.SetDict ( pDict );
tTermSetup.m_pIndex = this;
Qword tQueryWord ( false, false, m_iIndexId );
if ( bFillOnly )
{
BYTE sWord[MAX_KEYWORD_BYTES];
ARRAY_FOREACH ( i, dKeywords )
{
CSphKeywordInfo &tInfo = dKeywords[i];
int iLen = tInfo.m_sTokenized.Length ();
memcpy ( sWord, tInfo.m_sTokenized.cstr (), iLen );
sWord[iLen] = '\0';
SphWordID_t iWord = pDict->GetWordID ( sWord );
if ( iWord )
{
tQueryWord.Reset ();
tQueryWord.m_sWord = tInfo.m_sTokenized;
tQueryWord.m_sDictWord = ( const char * ) sWord;
tQueryWord.m_uWordID = iWord;
tTermSetup.QwordSetup ( &tQueryWord );
tInfo.m_iDocs += tQueryWord.m_iDocs;
tInfo.m_iHits += tQueryWord.m_iHits;
}
}
return true;
}
bool bWordDict = pDict->GetSettings ().m_bWordDict;
TokenizerRefPtr_c pTokenizer = m_pQueryTokenizer->Clone ( SPH_CLONE );
pTokenizer->EnableTokenizedMultiformTracking ();
// need to support '*' and '=' but not the other specials
// so m_pQueryTokenizer does not work for us, gotta clone and setup one manually
if ( IsStarDict ( bWordDict ) )
pTokenizer->AddPlainChars ( "*" );
if ( m_tSettings.m_bIndexExactWords )
pTokenizer->AddSpecials ( "=" );
if ( !m_tSettings.m_sIndexTokenFilter.IsEmpty() )
{
CSphString sError;
Tokenizer::AddPluginFilterTo ( pTokenizer, m_tSettings.m_sIndexTokenFilter, sError );
if ( !sError.IsEmpty() )
{
if ( pError )
*pError = sError;
return false;
}
if ( !pTokenizer->SetFilterSchema ( m_tSchema, sError ) )
{
if ( pError )
*pError = sError;
return false;
}
}
pTokenizer->SetBuffer ( sModifiedQuery, (int)strlen ( (const char*)sModifiedQuery ) );
ExpansionContext_t tExpCtx;
// query defined options
tExpCtx.m_iExpansionLimit = ( tSettings.m_iExpansionLimit ? tSettings.m_iExpansionLimit : m_iExpansionLimit );
bool bExpandWildcards = ( bWordDict && IsStarDict ( bWordDict ) && !tSettings.m_bFoldWildcards );
CSphPlainQueryFilter tAotFilter;
tAotFilter.m_pTokenizer = std::move ( pTokenizer );
tAotFilter.m_pDict = std::move ( pDict );
tAotFilter.m_pSettings = &m_tSettings;
tAotFilter.m_pTermSetup = &tTermSetup;
tAotFilter.m_pQueryWord = &tQueryWord;
tAotFilter.m_tFoldSettings = tSettings;
tAotFilter.m_tFoldSettings.m_bFoldWildcards = !bExpandWildcards;
tExpCtx.m_pWordlist = &m_tWordlist;
tExpCtx.m_iMinPrefixLen = m_tSettings.GetMinPrefixLen ( bWordDict );
tExpCtx.m_iMinInfixLen = m_tSettings.m_iMinInfixLen;
tExpCtx.m_bHasExactForms = ( m_pDict->HasMorphology() || m_tSettings.m_bIndexExactWords );
tExpCtx.m_bMergeSingles = false;
tExpCtx.m_eHitless = m_tSettings.m_eHitless;
tExpCtx.m_iCutoff = tSettings.m_iCutoff;
tExpCtx.m_bAllowExpansion = tSettings.m_bAllowExpansion;
tAotFilter.GetKeywords ( dKeywords, tExpCtx );
return true;
}
bool CSphIndex_VLN::FillKeywords ( CSphVector <CSphKeywordInfo> & dKeywords ) const
{
GetKeywordsSettings_t tSettings;
tSettings.m_bStats = true;
WITH_QWORD ( this, false, Qword, return DoGetKeywords<Qword> ( dKeywords, NULL, tSettings, true, NULL ) );
return false;
}
static int sphQueryHeightCalc ( const XQNode_t * pNode )
{
if ( pNode->m_dChildren.IsEmpty() )
{
// exception, pre-cached OR of tiny (rare) keywords is just one node
if ( pNode->GetOp()==SPH_QUERY_OR )
{
#ifndef NDEBUG
// sanity checks
// this node must be only created for a huge OR of tiny expansions
assert ( pNode->m_dWords.GetLength() );
ARRAY_FOREACH ( i, pNode->m_dWords )
{
assert ( pNode->m_dWords[i].m_iAtomPos==pNode->m_dWords[0].m_iAtomPos );
assert ( pNode->m_dWords[i].m_bExpanded );
}
#endif
return 1;
}
return pNode->m_dWords.GetLength();
}
if ( pNode->GetOp()==SPH_QUERY_BEFORE )
return 1;
int iMaxChild = 0;
int iHeight = 0;
ARRAY_FOREACH ( i, pNode->m_dChildren )
{
int iBottom = sphQueryHeightCalc ( pNode->m_dChildren[i] );
int iTop = pNode->m_dChildren.GetLength()-i-1;
if ( iBottom+iTop>=iMaxChild+iHeight )
{
iMaxChild = iBottom;
iHeight = iTop;
}
}
return iMaxChild+iHeight;
}
#if defined( __clang__ )
#if defined( __x86_64__ )
static int SPH_EXTNODE_STACK_SIZE = 0x140;
#else // if defined ( __ARM_ARCH_ISA_A64 ) and all the others
static int SPH_EXTNODE_STACK_SIZE = 0x160;
#endif
#elif defined( _WIN32 )
static int SPH_EXTNODE_STACK_SIZE = 630;
#else
static int SPH_EXTNODE_STACK_SIZE = 160;
#endif
// extra stack which need despite EXTNODE_STACK_SIZE
static DWORD SPH_EXTRA_BUDGET = 0x2000;
void SetExtNodeStackSize ( int iDelta, int iExtra )
{
if ( iDelta )
{
SPH_EXTNODE_STACK_SIZE = iDelta;
#if defined( _WIN32 )
SPH_EXTNODE_STACK_SIZE += 0x80;
#endif
}
if ( iExtra )
SPH_EXTRA_BUDGET = iExtra;// + 0x100;
}
/*
Why EXTRA_BUDGET?
CREATE TABLE if not exists t ( id bigint, f text );
replace into t (id,f) values (1, 'a b');
flush rtindex t;
crash:
SELECT * FROM t WHERE MATCH('(a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b b "a b") | ( a -b )');
WARNING: Stack used 42409, need 88000, sum 130409, have 131072
(data got on debug build with clang 14 on linux)
Strictly speaking, we need to mock it - create rt, then query - without any disk footprint.
You see, that having 131072 is not enough to process query which needs 130409. Tree size is right, but extra space is need - first, to move inside query evaluation,
and also on query leaves - reading docs/hits from disk with extra functions for caching, working with fs, profiling, etc. That is oneshot extra budget over calculated expression stuff.
*/
int ConsiderStackAbsolute ( const struct XQNode_t* pRoot )
{
int iHeight = 0;
if ( pRoot )
iHeight = sphQueryHeightCalc ( pRoot );
return iHeight * SPH_EXTNODE_STACK_SIZE + SPH_EXTRA_BUDGET;
}
int ConsiderStack ( const struct XQNode_t * pRoot, CSphString & sError )
{
auto iStackNeed = ConsiderStackAbsolute ( pRoot );
int64_t iQueryStack = Threads::GetStackUsed() + iStackNeed;
// sphWarning ( "Stack used %d, need %d (%d * %d + %d), sum %d, have %d", (int)Threads::GetStackUsed(), iStackNeed, iHeight, SPH_EXTNODE_STACK_SIZE, SPH_EXTRA_BUDGET, (int)iQueryStack, Threads::MyStackSize() );
auto iMyStackSize = Threads::MyStackSize ();
if ( iMyStackSize>=iQueryStack )
return -1;
// align as stack of tree + 8K
// (being run in new coro, most probably you'll start near the top of stack, so 32k should be enough)
iQueryStack = iStackNeed + 8*1024;
if ( session::GetMaxStackSize()>=iQueryStack )
return (int)iQueryStack;
sError.SetSprintf ( "query too complex, not enough stack (thread_stack=%dK or higher required)", (int) (( iQueryStack+1024-( iQueryStack % 1024 )) / 1024 ));
return 0;
}
static XQNode_t * CloneKeyword ( const XQNode_t * pNode )
{
assert ( pNode );
XQNode_t * pRes = new XQNode_t ( pNode->m_dSpec );
pRes->m_dWords = pNode->m_dWords;
return pRes;
}
static XQNode_t * ExpandKeyword ( XQNode_t * pNode, const CSphIndexSettings & tSettings, int iExpandKeywords, bool bWordDict )
{
assert ( pNode );
if ( tSettings.m_bIndexExactWords && ( iExpandKeywords & KWE_MORPH_NONE )==KWE_MORPH_NONE )
{
if ( !pNode->m_dWords[0].m_sWord.Begins( "=" ) )
pNode->m_dWords[0].m_sWord.SetSprintf ( "=%s", pNode->m_dWords[0].m_sWord.cstr() );
return pNode;
}
// do not expand into wildcard words shorter than min_prefix_len or min_infix_len
bool bExpandInfix = false;
bool bExpandPrefix = false;
if ( ( iExpandKeywords & KWE_STAR )==KWE_STAR )
{
assert ( pNode->m_dChildren.GetLength()==0 );
assert ( pNode->m_dWords.GetLength()==1 );
int iLen = sphUTF8Len ( pNode->m_dWords[0].m_sWord.cstr() );
int iMinInfix = tSettings.m_iMinInfixLen;
int iMinPrefix = tSettings.RawMinPrefixLen();
if ( iMinInfix>0 && iLen>=iMinInfix )
bExpandInfix = true;
else if ( iMinPrefix>0 && iLen>=iMinPrefix )
bExpandPrefix = true;
}
bool bExpandExact = ( tSettings.m_bIndexExactWords && ( iExpandKeywords & KWE_EXACT )==KWE_EXACT );
if ( !bExpandInfix && !bExpandPrefix && !bExpandExact )
return pNode;
XQNode_t * pExpand = new XQNode_t ( pNode->m_dSpec );
pExpand->SetOp ( SPH_QUERY_OR, pNode );
if ( bExpandInfix )
{
assert ( pNode->m_dChildren.GetLength()==0 );
assert ( pNode->m_dWords.GetLength()==1 );
XQNode_t * pInfix = CloneKeyword ( pNode );
pInfix->m_dWords[0].m_sWord.SetSprintf ( "*%s*", pNode->m_dWords[0].m_sWord.cstr() );
pInfix->m_pParent = pExpand;
pExpand->m_dChildren.Add ( pInfix );
} else if ( bExpandPrefix )
{
assert ( pNode->m_dChildren.GetLength()==0 );
assert ( pNode->m_dWords.GetLength()==1 );
XQNode_t * pPrefix = CloneKeyword ( pNode );
pPrefix->m_dWords[0].m_sWord.SetSprintf ( "%s*", pNode->m_dWords[0].m_sWord.cstr() );
pPrefix->m_pParent = pExpand;
pExpand->m_dChildren.Add ( pPrefix );
}
if ( bExpandExact )
{
assert ( pNode->m_dChildren.GetLength()==0 );
assert ( pNode->m_dWords.GetLength()==1 );
XQNode_t * pExact = CloneKeyword ( pNode );
if ( pNode->m_dWords[0].m_sWord.Begins( "=" ) )
{
pExact->m_dWords[0].m_sWord = pNode->m_dWords[0].m_sWord;
} else
{
pExact->m_dWords[0].m_sWord.SetSprintf ( "=%s", pNode->m_dWords[0].m_sWord.cstr() );
}
pExact->m_pParent = pExpand;
pExpand->m_dChildren.Add ( pExact );
}
return pExpand;
}
static bool SkipExpand ( const CSphString & sWord )
{
return ( sWord.Begins("=") || sWord.Begins("*") || sWord.Ends("*") );
}
void sphQueryExpandKeywords ( XQNode_t ** ppNode, const CSphIndexSettings & tSettings, int iExpandKeywords, bool bWordDict )
{
assert ( ppNode );
assert ( *ppNode );
auto& pNode = *ppNode;
// only if expansion makes sense at all
assert ( tSettings.m_iMinInfixLen>0 || tSettings.GetMinPrefixLen ( bWordDict )>0 || tSettings.m_bIndexExactWords );
assert ( iExpandKeywords!=KWE_DISABLED );
// process children for composite nodes
if ( pNode->m_dChildren.GetLength() )
{
ARRAY_FOREACH ( i, pNode->m_dChildren )
{
sphQueryExpandKeywords ( &pNode->m_dChildren[i], tSettings, iExpandKeywords, bWordDict );
pNode->m_dChildren[i]->m_pParent = pNode;
}
return;
}
// if that's a phrase/proximity node, create a very special, magic phrase/proximity node
if ( pNode->GetOp()==SPH_QUERY_PHRASE || pNode->GetOp()==SPH_QUERY_PROXIMITY || pNode->GetOp()==SPH_QUERY_QUORUM )
{
assert ( pNode->m_dWords.GetLength()>1 );
// should skip expansion if all terms have modifiers
if ( pNode->m_dWords.all_of ( [] ( const XQKeyword_t & tWord ) { return SkipExpand ( tWord.m_sWord ); } ) )
return;
ARRAY_FOREACH ( i, pNode->m_dWords )
{
auto * pWord = new XQNode_t ( pNode->m_dSpec );
pWord->m_dWords.Add ( pNode->m_dWords[i] );
// should not expand if word already has any modifiers
if ( SkipExpand ( pWord->m_dWords[0].m_sWord ) )
pNode->m_dChildren.Add ( pWord );
else
pNode->m_dChildren.Add ( ExpandKeyword ( pWord, tSettings, iExpandKeywords, bWordDict ) );
pNode->m_dChildren.Last()->m_iAtomPos = pNode->m_dWords[i].m_iAtomPos;
pNode->m_dChildren.Last()->m_pParent = pNode;
}
pNode->m_dWords.Reset();
pNode->m_bVirtuallyPlain = true;
return;
}
// skip empty plain nodes
if ( pNode->m_dWords.GetLength()<=0 )
return;
// process keywords for plain nodes
assert ( pNode->m_dWords.GetLength()==1 );
if ( SkipExpand ( pNode->m_dWords[0].m_sWord ) )
return;
// do the expansion
pNode = ExpandKeyword ( pNode, tSettings, iExpandKeywords, bWordDict );
}
// transform the "one two three"/1 quorum into one|two|three (~40% faster)
static void TransformQuorum ( XQNode_t ** ppNode )
{
XQNode_t *& pNode = *ppNode;
// recurse non-quorum nodes
if ( pNode->GetOp()!=SPH_QUERY_QUORUM )
{
ARRAY_FOREACH ( i, pNode->m_dChildren )
TransformQuorum ( &pNode->m_dChildren[i] );
return;
}
// skip quorums with thresholds other than 1
if ( pNode->m_iOpArg!=1 )
return;
// transform quorums with a threshold of 1 only
assert ( pNode->GetOp()==SPH_QUERY_QUORUM && pNode->m_dChildren.GetLength()==0 );
CSphVector<XQNode_t*> dArgs;
ARRAY_FOREACH ( i, pNode->m_dWords )
{
XQNode_t * pAnd = new XQNode_t ( pNode->m_dSpec );
pAnd->m_dWords.Add ( pNode->m_dWords[i] );
dArgs.Add ( pAnd );
}
pNode->m_dWords.Reset();
pNode->SetOp ( SPH_QUERY_OR, dArgs );
}
struct BinaryNode_t
{
int m_iLo;
int m_iHi;
};
static void BuildExpandedTree ( const XQKeyword_t & tRootWord, const ISphWordlist::Args_t & dWordSrc, XQNode_t * pRoot )
{
assert ( dWordSrc.m_dExpanded.GetLength() );
pRoot->m_dWords.Reset();
// build a binary tree from all the other expansions
CSphVector<BinaryNode_t> dNodes;
dNodes.Reserve ( dWordSrc.m_dExpanded.GetLength() );
XQNode_t * pCur = pRoot;
dNodes.Add();
dNodes.Last().m_iLo = 0;
dNodes.Last().m_iHi = ( dWordSrc.m_dExpanded.GetLength()-1 );
while ( dNodes.GetLength() )
{
BinaryNode_t tNode = dNodes.Pop();
if ( tNode.m_iHi<tNode.m_iLo )
{
pCur = pCur->m_pParent;
continue;
}
int iMid = ( tNode.m_iLo+tNode.m_iHi ) / 2;
dNodes.Add ();
dNodes.Last().m_iLo = tNode.m_iLo;
dNodes.Last().m_iHi = iMid-1;
dNodes.Add ();
dNodes.Last().m_iLo = iMid+1;
dNodes.Last().m_iHi = tNode.m_iHi;
if ( pCur->m_dWords.GetLength() )
{
assert ( pCur->m_dWords.GetLength()==1 );
XQNode_t * pTerm = CloneKeyword ( pRoot );
Swap ( pTerm->m_dWords, pCur->m_dWords );
pCur->m_dChildren.Add ( pTerm );
pTerm->m_pParent = pCur;
}
XQNode_t * pChild = CloneKeyword ( pRoot );
pChild->m_dWords.Add ( tRootWord );
pChild->m_dWords.Last().m_sWord = dWordSrc.GetWordExpanded ( iMid );
pChild->m_dWords.Last().m_bExpanded = true;
pChild->m_bNotWeighted = pRoot->m_bNotWeighted;
pChild->m_pParent = pCur;
pCur->m_dChildren.Add ( pChild );
pCur->SetOp ( SPH_QUERY_OR );
pCur = pChild;
}
}
static XQNode_t * ExpandXQNode ( const ExpansionContext_t & tCtx, ISphWordlist::Args_t & tArgs, XQNode_t * pNode );
/// do wildcard expansion for keywords dictionary
/// (including prefix and infix expansion)
XQNode_t * sphExpandXQNode ( XQNode_t * pNode, ExpansionContext_t & tCtx )
{
assert ( pNode );
// process children for composite nodes
if ( pNode->m_dChildren.GetLength() )
{
ARRAY_FOREACH ( i, pNode->m_dChildren )
{
pNode->m_dChildren[i] = sphExpandXQNode ( pNode->m_dChildren[i], tCtx );
pNode->m_dChildren[i]->m_pParent = pNode;
}
return pNode;
}
// if that's a phrase/proximity node, create a very special, magic phrase/proximity node
if ( pNode->GetOp()==SPH_QUERY_PHRASE || pNode->GetOp()==SPH_QUERY_PROXIMITY || pNode->GetOp()==SPH_QUERY_QUORUM )
{
assert ( pNode->m_dWords.GetLength()>1 );
ARRAY_FOREACH ( i, pNode->m_dWords )
{
XQNode_t * pWord = new XQNode_t ( pNode->m_dSpec );
pWord->m_dWords.Add ( pNode->m_dWords[i] );
pNode->m_dChildren.Add ( sphExpandXQNode ( pWord, tCtx ) );
pNode->m_dChildren.Last()->m_iAtomPos = pNode->m_dWords[i].m_iAtomPos;
pNode->m_dChildren.Last()->m_pParent = pNode;
// tricky part
// current node may have field/zone limits attached
// normally those get pushed down during query parsing
// but here we create nodes manually and have to push down limits too
pWord->CopySpecs ( pNode );
}
pNode->m_dWords.Reset();
pNode->m_bVirtuallyPlain = true;
return pNode;
}
// skip empty plain nodes
if ( pNode->m_dWords.GetLength()<=0 )
return pNode;
// process keywords for plain nodes
assert ( pNode->m_dChildren.GetLength()==0 );
assert ( pNode->m_dWords.GetLength()==1 );
// might be a pass to only fixup of the tree
if ( tCtx.m_bOnlyTreeFix )
return pNode;
assert ( tCtx.m_pResult );
// check the wildcards
const char * sFull = pNode->m_dWords[0].m_sWord.cstr();
const bool bRegex = ( pNode->m_dWords[0].m_bRegex );
// no wildcards, or just wildcards? do not expand
if ( !( bRegex || sphHasExpandableWildcards ( sFull ) ) )
return pNode;
// regex builds tree in batch mode
if ( bRegex )
{
tCtx.m_dRegexTerms.Add ( std::make_pair ( sFull, pNode ) );
return pNode;
}
bool bUseTermMerge = ( tCtx.m_bMergeSingles && pNode->m_dSpec.m_dZones.IsEmpty() );
ISphWordlist::Args_t tArgs ( bUseTermMerge, tCtx );
if ( !sphExpandGetWords ( sFull, tCtx, tArgs ) )
{
tCtx.m_pResult->m_sWarning.SetSprintf ( "Query word length is less than min %s length. word: '%s' ", ( tCtx.m_iMinInfixLen>0 ? "infix" : "prefix" ), sFull );
return pNode;
}
return ExpandXQNode ( tCtx, tArgs, pNode );
}
XQNode_t * ExpandXQNode ( const ExpansionContext_t & tCtx, ISphWordlist::Args_t & tArgs, XQNode_t * pNode )
{
// no real expansions?
// mark source word as expanded to prevent warning on terms mismatch in statistics
if ( !tArgs.m_dExpanded.GetLength() && !tArgs.m_pPayload )
{
tCtx.m_pResult->AddStat ( pNode->m_dWords.Begin()->m_sWord, 0, 0 );
pNode->m_dWords.Begin()->m_bExpanded = true;
return pNode;
}
// copy the original word (iirc it might get overwritten),
const XQKeyword_t tRootWord = pNode->m_dWords[0];
tCtx.m_pResult->AddStat ( tRootWord.m_sWord, tArgs.m_iTotalDocs, tArgs.m_iTotalHits );
// and build a binary tree of all the expansions
if ( tArgs.m_dExpanded.GetLength() )
{
BuildExpandedTree ( tRootWord, tArgs, pNode );
}
if ( tArgs.m_pPayload )
{
ISphSubstringPayload * pPayload = tArgs.m_pPayload.release();
tCtx.m_pPayloads->Add( pPayload );
if ( pNode->m_dWords.GetLength() )
{
// all expanded fit to single payload
pNode->m_dWords.Begin()->m_bExpanded = true;
pNode->m_dWords.Begin()->m_pPayload = pPayload;
} else
{
// payload added to expanded binary tree
assert ( pNode->GetOp()==SPH_QUERY_OR );
assert ( pNode->m_dChildren.GetLength() );
XQNode_t * pSubstringNode = new XQNode_t ( pNode->m_dSpec );
pSubstringNode->SetOp ( SPH_QUERY_OR );
XQKeyword_t tSubstringWord = tRootWord;
tSubstringWord.m_bExpanded = true;
tSubstringWord.m_pPayload = pPayload;
pSubstringNode->m_dWords.Add ( tSubstringWord );
pNode->m_dChildren.Add ( pSubstringNode );
pSubstringNode->m_pParent = pNode;
}
}
return pNode;
}
bool ExpandRegex ( ExpansionContext_t & tCtx, CSphString & sError )
{
if ( !tCtx.m_dRegexTerms.GetLength() )
return true;
if ( tCtx.m_dRegexTerms.GetLength() )
{
#if !WITH_RE2
sError.SetSprintf ( "REGEXP full-text operator used but no regexp support compiled" );
return false;
#endif
}
CSphFixedVector<std::unique_ptr < DictTerm2Expanded_i > > dConverters ( tCtx.m_dRegexTerms.GetLength() );
ISphWordlist::Args_t tArgs ( true, tCtx );
assert ( tCtx.m_pWordlist );
tCtx.m_pWordlist->ScanRegexWords ( tCtx.m_dRegexTerms, tArgs, dConverters );
assert ( !dConverters.GetLength() || ( tCtx.m_dRegexTerms.GetLength()==dConverters.GetLength() ) );
ARRAY_FOREACH ( i, dConverters )
{
if ( !dConverters[i] )
continue;
ISphWordlist::Args_t tArgs ( true, tCtx );
dConverters[i]->Convert ( tArgs );
ExpandXQNode ( tCtx, tArgs, tCtx.m_dRegexTerms[i].second );
}
return true;
}
bool sphHasExpandableWildcards ( const char * sWord )
{
const char * pCur = sWord;
int iWilds = 0;
for ( ; *pCur; pCur++ )
if ( sphIsWild ( *pCur ) )
iWilds++;
int iLen = int ( pCur - sWord );
return ( iWilds && iWilds<iLen );
}
bool sphExpandGetWords ( const char * sWord, const ExpansionContext_t & tCtx, ISphWordlist::Args_t & tWordlist )
{
// fix for the case '=*term' that should count as infix
if ( sWord[0]=='=' && sWord[1]=='*' )
sWord++;
if ( !sphIsWild ( *sWord ) || tCtx.m_iMinInfixLen==0 )
{
// do prefix expansion
// remove exact form modifier, if any
const char * sPrefix = sWord;
if ( *sPrefix=='=' )
sPrefix++;
// skip leading wildcards
// (in case we got here on non-infixed index path)
const char * sWildcard = sPrefix;
while ( sphIsWild ( *sPrefix ) )
{
sPrefix++;
sWildcard++;
}
// compute non-wildcard prefix length
int iPrefix = 0;
const char * sCodes = sPrefix;
for ( ; *sCodes && !sphIsWild ( *sCodes ); sCodes+=sphUtf8CharBytes ( *sCodes ) )
iPrefix++;
// do not expand prefixes under min length
if ( iPrefix<tCtx.m_iMinPrefixLen )
return false;
int iBytes = int ( sCodes - sPrefix );
// prefix expansion should work on nonstemmed words only
char sFixed[MAX_KEYWORD_BYTES];
if ( tCtx.m_bHasExactForms )
{
sFixed[0] = MAGIC_WORD_HEAD_NONSTEMMED;
memcpy ( sFixed+1, sPrefix, iBytes );
sPrefix = sFixed;
iBytes++;
}
tCtx.m_pWordlist->GetPrefixedWords ( sPrefix, iBytes, sWildcard, tWordlist );
} else
{
// do infix expansion
assert ( sphIsWild ( *sWord ) );
assert ( tCtx.m_iMinInfixLen>0 );
// find the longest substring of non-wildcards
int iCodepoints = 0;
int iInfixCodepoints = 0;
int iInfixBytes = 0;
const char * sMaxInfix = NULL;
const char * sInfix = sWord;
for ( const char * s = sWord; *s; )
{
int iCodeLen = sphUtf8CharBytes ( *s );
if ( sphIsWild ( *s ) )
{
sInfix = s + 1;
iCodepoints = 0;
} else
{
iCodepoints++;
if ( s - sInfix + iCodeLen > iInfixBytes )
{
sMaxInfix = sInfix;
iInfixBytes = int ( s - sInfix ) + iCodeLen;
iInfixCodepoints = iCodepoints;
}
}
s += iCodeLen;
}
// do not expand infixes under min_infix_len
if ( iInfixCodepoints < tCtx.m_iMinInfixLen )
return false;
// ignore heading star
tCtx.m_pWordlist->GetInfixedWords ( sMaxInfix, iInfixBytes, sWord, tWordlist );
}
return true;
}
XQNode_t * CSphIndex_VLN::ExpandPrefix ( XQNode_t * pNode, CSphQueryResultMeta & tMeta, CSphScopedPayload * pPayloads, DWORD uQueryDebugFlags, int iQueryExpansionLimit ) const
{
if ( !pNode || !m_pDict->GetSettings().m_bWordDict
|| ( m_tSettings.GetMinPrefixLen ( m_pDict->GetSettings().m_bWordDict )<=0 && m_tSettings.m_iMinInfixLen<=0 ) )
return pNode;
assert ( m_bPassedAlloc );
assert ( !m_tWordlist.m_tBuf.IsEmpty() );
ExpansionContext_t tCtx;
tCtx.m_pWordlist = &m_tWordlist;
tCtx.m_pResult = &tMeta;
tCtx.m_iMinPrefixLen = m_tSettings.GetMinPrefixLen ( m_pDict->GetSettings().m_bWordDict );
tCtx.m_iMinInfixLen = m_tSettings.m_iMinInfixLen;
tCtx.m_iExpansionLimit = GetExpansionLimit ( iQueryExpansionLimit, m_iExpansionLimit );
tCtx.m_bHasExactForms = ( m_pDict->HasMorphology() || m_tSettings.m_bIndexExactWords );
tCtx.m_bMergeSingles = ( uQueryDebugFlags & QUERY_DEBUG_NO_PAYLOAD )==0;
tCtx.m_pPayloads = pPayloads;
tCtx.m_eHitless = m_tSettings.m_eHitless;
pNode = sphExpandXQNode ( pNode, tCtx );
if ( !ExpandRegex ( tCtx, tMeta.m_sError ) )
return nullptr;
pNode->Check ( true );
tCtx.AggregateStats();
return pNode;
}
// transform the (A B) NEAR C into A NEAR B NEAR C
static void TransformNear ( XQNode_t ** ppNode )
{
XQNode_t *& pNode = *ppNode;
if ( pNode->GetOp()==SPH_QUERY_NEAR )
{
assert ( pNode->m_dWords.GetLength()==0 );
CSphVector<XQNode_t*> dArgs;
int iStartFrom;
// transform all (A B C) NEAR D into A NEAR B NEAR C NEAR D
do
{
dArgs.Reset();
iStartFrom = 0;
ARRAY_FOREACH ( i, pNode->m_dChildren )
{
XQNode_t * pChild = pNode->m_dChildren[i]; ///< shortcut
if ( pChild->GetOp()==SPH_QUERY_AND && pChild->m_dChildren.GetLength()>0 )
{
ARRAY_FOREACH ( j, pChild->m_dChildren )
{
if ( j==0 && iStartFrom==0 )
{
// we will remove the node anyway, so just replace it with 1-st child instead
pNode->m_dChildren[i] = pChild->m_dChildren[j];
pNode->m_dChildren[i]->m_pParent = pNode;
iStartFrom = i+1;
} else
{
dArgs.Add ( pChild->m_dChildren[j] );
}
}
pChild->m_dChildren.Reset();
SafeDelete ( pChild );
} else if ( iStartFrom!=0 )
{
dArgs.Add ( pChild );
}
}
if ( iStartFrom!=0 )
{
pNode->m_dChildren.Resize ( iStartFrom + dArgs.GetLength() );
ARRAY_FOREACH ( i, dArgs )
{
pNode->m_dChildren [ i + iStartFrom ] = dArgs[i];
pNode->m_dChildren [ i + iStartFrom ]->m_pParent = pNode;
}
}
} while ( iStartFrom!=0 );
}
ARRAY_FOREACH ( i, pNode->m_dChildren )
TransformNear ( &pNode->m_dChildren[i] );
}
/// tag excluded keywords (rvals to operator NOT)
static void TagExcluded ( XQNode_t * pNode, bool bNot )
{
if ( pNode->GetOp()==SPH_QUERY_ANDNOT )
{
assert ( pNode->m_dChildren.GetLength()==2 );
assert ( pNode->m_dWords.GetLength()==0 );
TagExcluded ( pNode->m_dChildren[0], bNot );
TagExcluded ( pNode->m_dChildren[1], !bNot );
} else if ( pNode->m_dChildren.GetLength() )
{
// FIXME? check if this works okay with "virtually plain" stuff?
ARRAY_FOREACH ( i, pNode->m_dChildren )
TagExcluded ( pNode->m_dChildren[i], bNot );
} else
{
// tricky bit
// no assert on length here and that is intended
// we have fully empty nodes (0 children, 0 words) sometimes!
ARRAY_FOREACH ( i, pNode->m_dWords )
pNode->m_dWords[i].m_bExcluded = bNot;
}
}
/// optimize phrase queries if we have bigrams
static void TransformBigrams ( XQNode_t * pNode, const CSphIndexSettings & tSettings )
{
assert ( tSettings.m_eBigramIndex!=SPH_BIGRAM_NONE );
assert ( tSettings.m_eBigramIndex==SPH_BIGRAM_ALL || tSettings.m_dBigramWords.GetLength() );
if ( pNode->GetOp()!=SPH_QUERY_PHRASE )
{
ARRAY_FOREACH ( i, pNode->m_dChildren )
TransformBigrams ( pNode->m_dChildren[i], tSettings );
return;
}
CSphBitvec bmRemove;
bmRemove.Init ( pNode->m_dWords.GetLength() );
for ( int i=0; i<pNode->m_dWords.GetLength()-1; i++ )
{
// check whether this pair was indexed
bool bBigram = false;
switch ( tSettings.m_eBigramIndex )
{
case SPH_BIGRAM_NONE:
break;
case SPH_BIGRAM_ALL:
bBigram = true;
break;
case SPH_BIGRAM_FIRSTFREQ:
bBigram = tSettings.m_dBigramWords.BinarySearch ( pNode->m_dWords[i].m_sWord )!=NULL;
break;
case SPH_BIGRAM_BOTHFREQ:
bBigram =
( tSettings.m_dBigramWords.BinarySearch ( pNode->m_dWords[i].m_sWord )!=NULL ) &&
( tSettings.m_dBigramWords.BinarySearch ( pNode->m_dWords[i+1].m_sWord )!=NULL );
break;
}
if ( !bBigram )
continue;
// replace the pair with a bigram keyword
// FIXME!!! set phrase weight for this "word" here
pNode->m_dWords[i].m_sWord.SetSprintf ( "%s%c%s",
pNode->m_dWords[i].m_sWord.cstr(),
MAGIC_WORD_BIGRAM,
pNode->m_dWords[i+1].m_sWord.cstr() );
// only mark for removal now, we will sweep later
// so that [a b c] would convert to ["a b" "b c"], not just ["a b" c]
bmRemove.BitClear ( i );
bmRemove.BitSet ( i+1 );
}
// remove marked words
int iOut = 0;
ARRAY_FOREACH ( i, pNode->m_dWords )
if ( !bmRemove.BitGet(i) )
pNode->m_dWords[iOut++] = pNode->m_dWords[i];
pNode->m_dWords.Resize ( iOut );
// fixup nodes that are not real phrases any more
if ( pNode->m_dWords.GetLength()==1 )
pNode->SetOp ( SPH_QUERY_AND );
}
void sphTransformExtendedQuery ( XQNode_t ** ppNode, const CSphIndexSettings & tSettings, bool bHasBooleanOptimization, const ISphKeywordsStat * pKeywords )
{
TransformQuorum ( ppNode );
( *ppNode )->Check ( true );
TransformNear ( ppNode );
( *ppNode )->Check ( true );
if ( tSettings.m_eBigramIndex!=SPH_BIGRAM_NONE )
TransformBigrams ( *ppNode, tSettings );
TagExcluded ( *ppNode, false );
( *ppNode )->Check ( true );
// boolean optimization
if ( bHasBooleanOptimization )
sphOptimizeBoolean ( ppNode, pKeywords );
}
static void SetupSplitFilter ( CSphFilterSettings & tFilter, int iPart, int iTotal )
{
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_sAttrName = "@rowid";
tFilter.m_iMinValue = iPart;
tFilter.m_iMaxValue = iTotal;
}
// basically the same code as QueryDiskChunks in an RT index
template<typename RUN>
static bool RunSplitQuery ( RUN && tRun, const CSphQuery & tQuery, CSphQueryResultMeta & tResult, VecTraits_T<ISphMatchSorter *> & dSorters, const CSphMultiQueryArgs & tArgs, QueryProfile_c * pProfiler, const SmallStringHash_T<int64_t> * pLocalDocs, int64_t iTotalDocs, const char * szIndexName, int iSplit, int64_t tmMaxTimer )
{
assert ( !dSorters.IsEmpty () );
// counter of tasks we will issue now
int iJobs = iSplit;
assert ( iJobs>=1 );
// pseudo-sharding scheduler
auto tDispatch = GetEffectivePseudoShardingDispatcherTemplate();
Dispatcher::Unify ( tDispatch, tQuery.m_tPseudoShardingDispatcher );
// the context
Threads::ClonableCtx_T<DiskChunkSearcherCtx_t, DiskChunkSearcherCloneCtx_t, Threads::ECONTEXT::ORDERED> tClonableCtx { dSorters, tResult };
auto pDispatcher = Dispatcher::Make ( iJobs, 0, tDispatch, tClonableCtx.IsSingle() );
tClonableCtx.LimitConcurrency ( pDispatcher->GetConcurrency() );
auto iStart = sphMicroTimer();
QUERYINFO << " Started: " << ( sphMicroTimer()-iStart ) << " index:" << szIndexName;
// because disk chunk search within the loop will switch the profiler state
SwitchProfile ( pProfiler, SPH_QSTATE_INIT );
if ( pProfiler )
pProfiler->m_iPseudoShards = iSplit;
std::atomic<bool> bInterrupt {false};
auto CheckInterrupt = [&bInterrupt]() { return bInterrupt.load ( std::memory_order_relaxed ); };
int iConcurrency = tClonableCtx.Concurrency(iJobs);
Threads::Coro::ExecuteN ( iConcurrency, [&]
{
auto pSource = pDispatcher->MakeSource();
int iJob = -1; // make it consumed
if ( !pSource->FetchTask ( iJob ) || CheckInterrupt() )
{
QUERYINFO << "Early finish parallel RunSplitQuery because of empty queue" << " index:" << szIndexName;
return; // already nothing to do, early finish.
}
auto tJobContext = tClonableCtx.CloneNewContext ( !iJob );
auto& tCtx = tJobContext.first;
auto Interrupt = [&bInterrupt, &tCtx, &szIndexName] ( const char* szReason ) {
tCtx.m_tMeta.m_sWarning = szReason;
bInterrupt.store ( true, std::memory_order_relaxed );
QUERYINFO << "RunSplitQuery interrupted " << szReason << " index:" << szIndexName;
};
QUERYINFO << "RunSplitQuery cloned context " << tJobContext.second << " index:" << szIndexName;
tClonableCtx.SetJobOrder ( tJobContext.second, iJob );
Threads::Coro::SetThrottlingPeriodMS ( session::GetThrottlingPeriodMS() );
while ( !CheckInterrupt() ) // some earlier job met error; abort.
{
QUERYINFO << "RunSplitQuery " << tJobContext.second << ", job " << iJob << " index:" << szIndexName;
myinfo::SetTaskInfo ( "%d ch %d:", Threads::Coro::NumOfRestarts(), iJob );
auto & dLocalSorters = tCtx.m_dSorters;
CSphQueryResultMeta tChunkMeta;
CSphQueryResult tChunkResult;
tChunkResult.m_pMeta = &tChunkMeta;
CSphQueryResultMeta & tThMeta = tCtx.m_tMeta;
tChunkMeta.m_pProfile = tThMeta.m_pProfile;
CSphMultiQueryArgs tMultiArgs ( tArgs.m_iIndexWeight );
tMultiArgs.m_iTag = tArgs.m_bModifySorterSchemas ? iJob+1 : tArgs.m_iTag;
tMultiArgs.m_uPackedFactorFlags = tArgs.m_uPackedFactorFlags;
tMultiArgs.m_pLocalDocs = pLocalDocs;
tMultiArgs.m_iTotalDocs = iTotalDocs;
tMultiArgs.m_bModifySorterSchemas = false;
tMultiArgs.m_iTotalThreads = iConcurrency;
CSphQuery tQueryWithExtraFilter = tQuery;
SetupSplitFilter ( tQueryWithExtraFilter.m_dFilters.Add(), iJob, iJobs );
bool bRes = tRun ( tChunkResult, tQueryWithExtraFilter, dLocalSorters, tMultiArgs );
bInterrupt.store ( !bRes, std::memory_order_relaxed );
if ( !iJob )
tThMeta.MergeWordStats ( tChunkMeta );
tThMeta.m_bHasPrediction |= tChunkMeta.m_bHasPrediction;
if ( tThMeta.m_bHasPrediction )
tThMeta.m_tStats.Add ( tChunkMeta.m_tStats );
if ( iJob < iJobs-1 && sph::TimeExceeded ( tmMaxTimer ) )
Interrupt ( "query time exceeded max_query_time" );
if ( tThMeta.m_sWarning.IsEmpty() && !tChunkMeta.m_sWarning.IsEmpty() )
tThMeta.m_sWarning = tChunkMeta.m_sWarning;
tThMeta.m_bTotalMatchesApprox |= tChunkMeta.m_bTotalMatchesApprox;
tThMeta.m_tIteratorStats.Merge ( tChunkMeta.m_tIteratorStats );
if ( CheckInterrupt() && !tChunkMeta.m_sError.IsEmpty() )
// FIXME? maybe handle this more gracefully (convert to a warning)?
tThMeta.m_sError = tChunkMeta.m_sError;
iJob = -1; // mark it consumed
if ( !pSource->FetchTask ( iJob ) || CheckInterrupt() )
return; // all is done
// yield and reschedule every quant of time. It gives work to other tasks
if ( Threads::Coro::RuntimeExceeded() )
{
if ( session::GetKilled() )
Interrupt ( "query was killed" );
else
Threads::Coro::RescheduleAndKeepCrashQuery();
}
}
});
QUERYINFO << "RunSplitQuery processed in " << tClonableCtx.NumWorked() << " thread(s)" << " index:" << szIndexName;
tClonableCtx.Finalize();
// can not fail query due to interruption or timeout
// that is valid result set with just warning
// parent sorters merge well in case of interruption or timeout
return ( tResult.m_sError.IsEmpty() );
}
template<typename RUN>
bool CSphIndex_VLN::SplitQuery ( RUN && tRun, CSphQueryResult & tResult, const CSphQuery & tQuery, const VecTraits_T<ISphMatchSorter *> & dAllSorters, const CSphMultiQueryArgs & tArgs, int64_t tmMaxTimer ) const
{
auto & tMeta = *tResult.m_pMeta;
QueryProfile_c * pProfile = tMeta.m_pProfile;
CSphVector<ISphMatchSorter*> dSorters;
dSorters.Reserve ( dAllSorters.GetLength() );
dAllSorters.Apply ([&dSorters] ( ISphMatchSorter* p ) { if ( p ) dSorters.Add(p); });
int iSplit = Max ( Min ( (int)m_tStats.m_iTotalDocuments, tArgs.m_iThreads ), 1 );
int64_t iTotalDocs = tArgs.m_iTotalDocs ? tArgs.m_iTotalDocs : m_tStats.m_iTotalDocuments;
bool bOk = RunSplitQuery ( tRun, tQuery, *tResult.m_pMeta, dSorters, tArgs, pProfile, tArgs.m_pLocalDocs, iTotalDocs, GetName(), iSplit, tmMaxTimer );
tResult.m_pBlobPool = m_tBlobAttrs.GetReadPtr();
tResult.m_pDocstore = m_pDocstore ? this : nullptr;
tResult.m_pColumnar = m_pColumnar.get();
PooledAttrsToPtrAttrs ( dSorters, m_tBlobAttrs.GetReadPtr(), m_pColumnar.get(), tArgs.m_bFinalizeSorters, pProfile, tArgs.m_bModifySorterSchemas );
return bOk;
}
bool CSphIndex_VLN::RunParsedMultiQuery ( int iStackNeed, DictRefPtr_c & pDict, bool bCloneDict, const CSphQuery & tQuery, CSphQueryResult & tResult, VecTraits_T<ISphMatchSorter*> & dSorters, const XQQuery_t & tParsed, const CSphMultiQueryArgs & tArgs, int64_t tmMaxTimer ) const
{
return Threads::Coro::ContinueBool ( iStackNeed, [&] {
// we could reuse tParsed as-is, but marking common subtrees modifies the tree
// so for now clone the tree before using
const XQQuery_t * pTree = &tParsed;
std::unique_ptr<XQQuery_t> pClonedTree;
if ( bCloneDict )
{
pClonedTree = std::unique_ptr<XQQuery_t> ( CloneXQQuery(tParsed) );
pTree = pClonedTree.get();
}
// flag common subtrees
int iCommonSubtrees = 0;
if ( m_iMaxCachedDocs && m_iMaxCachedHits )
iCommonSubtrees = sphMarkCommonSubtrees ( 1, pTree );
CSphQueryNodeCache tNodeCache ( iCommonSubtrees, m_iMaxCachedDocs, m_iMaxCachedHits );
DictRefPtr_c pClonedDict;
if ( bCloneDict )
{
pClonedDict = GetStatelessDict ( m_pDict );
SetupStarDict ( pClonedDict );
SetupExactDict ( pClonedDict );
}
bool bResult = ParsedMultiQuery ( tQuery, tResult, dSorters, *pTree, bCloneDict ? std::move (pClonedDict) : std::move (pDict), tArgs, &tNodeCache, tmMaxTimer );
PooledAttrsToPtrAttrs ( dSorters, m_tBlobAttrs.GetReadPtr(), m_pColumnar.get(), tArgs.m_bFinalizeSorters, tResult.m_pMeta->m_pProfile, tArgs.m_bModifySorterSchemas );
return bResult;
});
}
/// one regular query vs many sorters (like facets, or similar for common-tree optimization)
bool CSphIndex_VLN::MultiQuery ( CSphQueryResult & tResult, const CSphQuery & tQuery, const VecTraits_T<ISphMatchSorter *> & dAllSorters, const CSphMultiQueryArgs & tArgs ) const
{
auto & tMeta = *tResult.m_pMeta;
QueryProfile_c * pProfile = tMeta.m_pProfile;
int64_t tmMaxTimer = 0;
std::unique_ptr<MiniTimer_c> pTimerGuard;
if ( tQuery.m_uMaxQueryMsec> 0 )
{
pTimerGuard = std::make_unique<MiniTimer_c>();
tmMaxTimer = pTimerGuard->Engage ( tQuery.m_uMaxQueryMsec ); // max_query_time
}
const QueryParser_i * pQueryParser = tQuery.m_pQueryParser;
assert ( pQueryParser );
MEMORY ( MEM_DISK_QUERY );
// to avoid the checking of a ppSorters's element for NULL on every next step, just filter out all nulls right here
CSphVector<ISphMatchSorter*> dSorters;
dSorters.Reserve ( dAllSorters.GetLength() );
dAllSorters.Apply ([&dSorters] ( ISphMatchSorter* p) { if ( p ) dSorters.Add(p); });
// if we have anything to work with
if ( dSorters.IsEmpty () )
return false;
// non-random at the start, random at the end
if ( dSorters.any_of ( []( ISphMatchSorter * p) { return p->IsRandom(); } ) )
dSorters.Sort ( CmpPSortersByRandom_fn() );
// fast path for scans
if ( pQueryParser->IsFullscan ( tQuery ) )
{
if ( tArgs.m_iThreads>1 )
{
return SplitQuery (
[this, &tmMaxTimer]
( CSphQueryResult & tChunkResult, const CSphQuery & tQuery, VecTraits_T<ISphMatchSorter *> dLocalSorters, const CSphMultiQueryArgs & tMultiArgs )
{ return MultiScan ( tChunkResult, tQuery, dLocalSorters, tMultiArgs, tmMaxTimer ); },
tResult, tQuery, dAllSorters, tArgs, tmMaxTimer );
} else
{
return MultiScan ( tResult, tQuery, dSorters, tArgs, tmMaxTimer );
}
}
SwitchProfile ( pProfile, SPH_QSTATE_DICT_SETUP );
DictRefPtr_c pDict = GetStatelessDict ( m_pDict );
SetupStarDict ( pDict );
SetupExactDict ( pDict );
CSphVector<BYTE> dFiltered;
const BYTE * sModifiedQuery = (const BYTE *)tQuery.m_sQuery.cstr();
FieldFilterOptions_t tFFOptions { tQuery.m_eJiebaMode };
if ( m_pFieldFilter && sModifiedQuery && m_pFieldFilter->Clone ( &tFFOptions )->Apply ( sModifiedQuery, dFiltered, true ) )
sModifiedQuery = dFiltered.Begin();
// parse query
SwitchProfile ( pProfile, SPH_QSTATE_PARSE );
assert ( m_pQueryTokenizer.Ptr() && m_pQueryTokenizerJson.Ptr() );
XQQuery_t tParsed;
if ( !pQueryParser->ParseQuery ( tParsed, (const char*)sModifiedQuery, &tQuery, m_pQueryTokenizer, m_pQueryTokenizerJson, &m_tSchema, pDict, m_tSettings, &m_tMorphFields ) )
{
// FIXME? might wanna reset profile to unknown state
tMeta.m_sError = tParsed.m_sParseError;
return false;
}
// check again for fullscan
if ( pQueryParser->IsFullscan ( tParsed ) )
{
if ( tArgs.m_iThreads>1 )
{
return SplitQuery (
[this, &tmMaxTimer]
( CSphQueryResult & tChunkResult, const CSphQuery & tQuery, VecTraits_T<ISphMatchSorter *> dLocalSorters, const CSphMultiQueryArgs & tMultiArgs )
{ return MultiScan ( tChunkResult, tQuery, dLocalSorters, tMultiArgs, tmMaxTimer ); },
tResult, tQuery, dAllSorters, tArgs, tmMaxTimer );
} else
{
return MultiScan ( tResult, tQuery, dSorters, tArgs, tmMaxTimer );
}
}
if ( !tParsed.m_sParseWarning.IsEmpty() )
tMeta.m_sWarning = tParsed.m_sParseWarning;
// transform query if needed (quorum transform, etc.)
SwitchProfile ( pProfile, SPH_QSTATE_TRANSFORMS );
sphTransformExtendedQuery ( &tParsed.m_pRoot, m_tSettings, tQuery.m_bSimplify, this );
bool bWordDict = pDict->GetSettings().m_bWordDict;
int iExpandKeywords = ExpandKeywords ( m_tMutableSettings.m_iExpandKeywords, tQuery.m_eExpandKeywords, m_tSettings, bWordDict );
if ( iExpandKeywords!=KWE_DISABLED )
{
sphQueryExpandKeywords ( &tParsed.m_pRoot, m_tSettings, iExpandKeywords, bWordDict );
tParsed.m_pRoot->Check ( true );
}
// this should be after keyword expansion
TransformAotFilter ( tParsed.m_pRoot, pDict->GetWordforms(), m_tSettings );
// expanding prefix in word dictionary case
CSphScopedPayload tPayloads;
XQNode_t * pPrefixed = ExpandPrefix ( tParsed.m_pRoot, tMeta, &tPayloads, tQuery.m_uDebugFlags, tQuery.m_iExpansionLimit );
if ( !pPrefixed )
return false;
tParsed.m_pRoot = pPrefixed;
tParsed.m_bNeedSZlist = tQuery.m_bZSlist;
int iStackNeed = ConsiderStack ( tParsed.m_pRoot, tMeta.m_sError );
if ( !iStackNeed )
return false;
if ( tArgs.m_iThreads>1 )
{
return SplitQuery (
[this, iStackNeed, &pDict, &tParsed, &tmMaxTimer]
( CSphQueryResult & tChunkResult, const CSphQuery & tQuery, VecTraits_T<ISphMatchSorter *> dLocalSorters, const CSphMultiQueryArgs & tMultiArgs )
{ return RunParsedMultiQuery ( iStackNeed, pDict, true, tQuery, tChunkResult, dLocalSorters, tParsed, tMultiArgs, tmMaxTimer ); },
tResult, tQuery, dAllSorters, tArgs, tmMaxTimer );
} else
{
return RunParsedMultiQuery ( iStackNeed, pDict, false, tQuery, tResult, dSorters, tParsed, tArgs, tmMaxTimer );
}
}
/// many regular queries with one sorter attached to each query.
/// returns true if at least one query succeeded. The failed queries indicated with pResult->m_iMultiplier==-1
bool CSphIndex_VLN::MultiQueryEx ( int iQueries, const CSphQuery * pQueries, CSphQueryResult * pResults, ISphMatchSorter ** ppSorters, const CSphMultiQueryArgs & tArgs ) const
{
// ensure we have multiple queries
if ( iQueries==1 )
return MultiQuery ( pResults[0], pQueries[0], { ppSorters, 1}, tArgs );
MEMORY ( MEM_DISK_QUERYEX );
assert ( ppSorters );
DictRefPtr_c pDict = GetStatelessDict ( m_pDict );
SetupStarDict ( pDict );
SetupExactDict ( pDict );
CSphFixedVector<XQQuery_t> dXQ ( iQueries );
CSphScopedPayload tPayloads;
bool bResult = false;
bool bResultScan = false;
int iStackNeed = 0;
bool bWordDict = pDict->GetSettings().m_bWordDict;
for ( int i=0; i<iQueries; ++i )
{
const auto & tCurQuery = pQueries[i];
auto & tCurResult = pResults[i];
auto & tMeta = *tCurResult.m_pMeta;
// nothing to do without a sorter
if ( !ppSorters[i] )
{
tMeta.m_iMultiplier = -1; ///< show that this particular query failed
continue;
}
// fast path for scans
if ( tCurQuery.m_sQuery.IsEmpty() )
{
MiniTimer_c tTimerGuard;
int64_t tmMaxTimer = tTimerGuard.Engage ( tCurQuery.m_uMaxQueryMsec ); // max_query_time
if ( MultiScan ( tCurResult, tCurQuery, { ppSorters+i, 1 }, tArgs, tmMaxTimer ) )
bResultScan = true;
else
tMeta.m_iMultiplier = -1; ///< show that this particular query failed
continue;
}
tMeta.m_tIOStats.Start();
// parse query
const QueryParser_i * pQueryParser = tCurQuery.m_pQueryParser;
assert ( pQueryParser );
if ( pQueryParser->ParseQuery ( dXQ[i], tCurQuery.m_sQuery.cstr(), &tCurQuery, m_pQueryTokenizer, m_pQueryTokenizerJson, &m_tSchema, pDict, m_tSettings, &m_tMorphFields ) )
{
// transform query if needed (quorum transform, keyword expansion, etc.)
sphTransformExtendedQuery ( &dXQ[i].m_pRoot, m_tSettings, tCurQuery.m_bSimplify, this );
int iExpandKeywords = ExpandKeywords ( m_tMutableSettings.m_iExpandKeywords, tCurQuery.m_eExpandKeywords, m_tSettings, bWordDict );
if ( iExpandKeywords!=KWE_DISABLED )
{
sphQueryExpandKeywords ( &dXQ[i].m_pRoot, m_tSettings, iExpandKeywords, bWordDict );
dXQ[i].m_pRoot->Check ( true );
}
// this should be after keyword expansion
TransformAotFilter ( dXQ[i].m_pRoot, pDict->GetWordforms(), m_tSettings );
// expanding prefix in word dictionary case
XQNode_t * pPrefixed = ExpandPrefix ( dXQ[i].m_pRoot, tMeta, &tPayloads, tCurQuery.m_uDebugFlags, tCurQuery.m_iExpansionLimit );
if ( pPrefixed )
{
dXQ[i].m_pRoot = pPrefixed;
iStackNeed = ConsiderStack ( dXQ[i].m_pRoot, tMeta.m_sError );
if ( iStackNeed!=0 )
{
bResult = true;
} else
{
tMeta.m_iMultiplier = -1;
SafeDelete ( dXQ[i].m_pRoot );
}
} else
{
tMeta.m_iMultiplier = -1;
SafeDelete ( dXQ[i].m_pRoot );
}
} else
{
tMeta.m_sError = dXQ[i].m_sParseError;
tMeta.m_iMultiplier = -1;
}
if ( !dXQ[i].m_sParseWarning.IsEmpty() )
tMeta.m_sWarning = dXQ[i].m_sParseWarning;
tMeta.m_tIOStats.Stop();
}
// continue only if we have at least one non-failed
if ( bResult )
Threads::Coro::Continue ( iStackNeed, [&]
{
int iCommonSubtrees = 0;
if ( m_iMaxCachedDocs && m_iMaxCachedHits )
iCommonSubtrees = sphMarkCommonSubtrees ( iQueries, &dXQ[0] );
CSphQueryNodeCache tNodeCache ( iCommonSubtrees, m_iMaxCachedDocs, m_iMaxCachedHits );
bResult = false;
for ( int j=0; j<iQueries; ++j )
{
const auto & tCurQuery = pQueries[j];
auto & tCurResult = pResults[j];
auto & tMeta = *tCurResult.m_pMeta;
// fullscan case
if ( tCurQuery.m_sQuery.IsEmpty() )
continue;
tMeta.m_tIOStats.Start();
MiniTimer_c tTimerGuard;
int64_t tmMaxTimer = tTimerGuard.Engage ( tCurQuery.m_uMaxQueryMsec ); // max_query_time
if ( dXQ[j].m_pRoot && ppSorters[j] && ParsedMultiQuery ( tCurQuery, tCurResult, { ppSorters+j, 1 }, dXQ[j], pDict, tArgs, &tNodeCache, tmMaxTimer ) )
{
bResult = true;
tMeta.m_iMultiplier = iCommonSubtrees ? iQueries : 1;
} else
tMeta.m_iMultiplier = -1;
tMeta.m_tIOStats.Stop();
}
});
PooledAttrsToPtrAttrs ( { ppSorters, iQueries }, m_tBlobAttrs.GetReadPtr(), m_pColumnar.get(), tArgs.m_bFinalizeSorters, pResults[0].m_pMeta->m_pProfile, tArgs.m_bModifySorterSchemas );
return bResult || bResultScan;
}
bool CSphIndex_VLN::CheckEarlyReject ( const CSphVector<CSphFilterSettings> & dFilters, const ISphFilter * pFilter, ESphCollation eCollation, const ISphSchema & tSchema ) const
{
if ( !pFilter || dFilters.IsEmpty() )
return false;
bool bHaveColumnar = false;
if ( m_pColumnar )
{
// check .SPC file minmax
CSphString sWarning;
int iNonColumnar = 0;
std::vector<common::Filter_t> dColumnarFilters;
ARRAY_FOREACH ( i, dFilters )
{
const CSphColumnInfo * pCol = m_tSchema.GetAttr ( dFilters[i].m_sAttrName.cstr() );
if ( !pCol )
continue;
if ( pCol->IsColumnar() || pCol->IsColumnarExpr() )
AddColumnarFilter ( dColumnarFilters, dFilters[i], eCollation, tSchema, sWarning );
else
iNonColumnar++;
}
bHaveColumnar = !dColumnarFilters.empty();
// can't process mixed plain and columnar attributes
if ( iNonColumnar )
return false;
if ( m_pColumnar->EarlyReject ( dColumnarFilters, *pFilter ) )
return true;
}
if ( bHaveColumnar )
return false;
if ( m_iDocinfoIndex )
{
// check .SPA file minmax
DWORD uStride = m_tSchema.GetRowSize();
DWORD * pMinEntry = const_cast<DWORD*> ( &m_pDocinfoIndex [ m_iDocinfoIndex*uStride*2 ] );
DWORD * pMaxEntry = pMinEntry + uStride;
if ( !pFilter->EvalBlock ( pMinEntry, pMaxEntry ) )
return true;
}
return false;
}
static void SetupRowIdBoundaries ( const CSphVector<CSphFilterSettings> & dFilters, RowID_t uTotalDocs, ISphRanker & tRanker )
{
const CSphFilterSettings * pRowIdFilter = nullptr;
for ( auto & i : dFilters )
if ( i.m_sAttrName=="@rowid" )
{
pRowIdFilter = &i;
break;
}
if ( !pRowIdFilter )
return;
RowIdBoundaries_t tBoundaries = GetFilterRowIdBoundaries ( *pRowIdFilter, uTotalDocs );
tRanker.ExtraData ( EXTRA_SET_BOUNDARIES, (void**)&tBoundaries );
}
bool CSphIndex_VLN::ParsedMultiQuery ( const CSphQuery & tQuery, CSphQueryResult & tResult, const VecTraits_T<ISphMatchSorter *> & dSorters, const XQQuery_t & tXQ, DictRefPtr_c pDict, const CSphMultiQueryArgs & tArgs, CSphQueryNodeCache * pNodeCache, int64_t tmMaxTimer ) const
{
assert ( !tQuery.m_sQuery.IsEmpty() && tQuery.m_eMode!=SPH_MATCH_FULLSCAN ); // scans must go through MultiScan()
assert ( tArgs.m_iTag>=0 );
auto& tMeta = *tResult.m_pMeta;
// start counting
int64_t tmQueryStart = sphMicroTimer();
auto tmCpuQueryStart = sphTaskCpuTimer ();
QueryProfile_c * pProfile = tMeta.m_pProfile;
ESphQueryState eOldState = SPH_QSTATE_UNKNOWN;
if ( pProfile )
eOldState = pProfile->Switch ( SPH_QSTATE_INIT );
std::optional<ScopedLowPriority_c> tPrio;
if ( tQuery.m_bLowPriority )
tPrio.emplace();
///////////////////
// setup searching
///////////////////
// non-ready index, empty response!
if ( !m_bPassedAlloc )
{
tMeta.m_sError = "table not preread";
return false;
}
CSphQueryContext tCtx(tQuery);
CreateFilterContext_t tFlx;
const ISphSchema * pMaxSorterSchema = nullptr;
CSphVector<CSphFilterSettings> dTransformedFilters; // holds filter settings if they were modified. filters hold pointers to those settings
CSphVector<FilterTreeItem_t> dTransformedFilterTree;
std::unique_ptr<ISphSchema> pModifiedMatchSchema; // may contain same schema but with modified eval stages
if ( !SetupFiltersAndContext ( tCtx, tFlx, tMeta, pMaxSorterSchema, dTransformedFilters, dTransformedFilterTree, pModifiedMatchSchema, dSorters, tArgs ) )
return false;
assert(pMaxSorterSchema);
const ISphSchema & tMaxSorterSchema = *pMaxSorterSchema;
// set blob pool for string on_sort expression fix up
tCtx.SetBlobPool ( m_tBlobAttrs.GetReadPtr() );
tCtx.m_uPackedFactorFlags = tArgs.m_uPackedFactorFlags;
// open files
DataReaderFactoryPtr_c pDoclist = m_pDoclistFile;
DataReaderFactoryPtr_c pHitlist = m_pHitlistFile;
if ( !pDoclist || !pHitlist )
SwitchProfile ( pProfile, SPH_QSTATE_OPEN );
if ( !pDoclist )
{
pDoclist = NewProxyReader ( GetFilename ( SPH_EXT_SPD ), tMeta.m_sError, DataReaderFactory_c::DOCS, m_tMutableSettings.m_tFileAccess.m_iReadBufferDocList, FileAccess_e::FILE );
if ( !pDoclist )
return false;
}
if ( !pHitlist )
{
pHitlist = NewProxyReader ( GetFilename ( SPH_EXT_SPP ), tMeta.m_sError, DataReaderFactory_c::HITS, m_tMutableSettings.m_tFileAccess.m_iReadBufferHitList, FileAccess_e::FILE );
if ( !pHitlist )
return false;
}
pDoclist->SetProfile ( pProfile );
pHitlist->SetProfile ( pProfile );
SwitchProfile ( pProfile, SPH_QSTATE_INIT );
// setup search terms
DiskIndexQwordSetup_c tTermSetup ( pDoclist, pHitlist, m_tSkiplists.GetReadPtr(), m_tSettings.m_iSkiplistBlockSize, true, RowID_t(m_iDocinfo) );
tTermSetup.SetDict ( std::move ( pDict ) );
tTermSetup.m_pIndex = this;
tTermSetup.m_iDynamicRowitems = tMaxSorterSchema.GetDynamicSize();
tTermSetup.m_iMaxTimer = tmMaxTimer;
tTermSetup.m_pWarning = &tMeta.m_sWarning;
tTermSetup.m_pCtx = &tCtx;
tTermSetup.m_pNodeCache = pNodeCache;
tTermSetup.m_bHasWideFields = ( m_tSchema.GetFieldsCount()>32 );
tMeta.m_bBigram = ( m_tSettings.m_eBigramIndex!=SPH_BIGRAM_NONE );
// setup prediction constrain
CSphQueryStats tQueryStats;
bool bCollectPredictionCounters = ( tQuery.m_iMaxPredictedMsec>0 );
int64_t iNanoBudget = (int64_t)( tQuery.m_iMaxPredictedMsec) * 1000000; // from milliseconds to nanoseconds
tQueryStats.m_pNanoBudget = &iNanoBudget;
if ( bCollectPredictionCounters )
tTermSetup.m_pStats = &tQueryStats;
// bind weights
tCtx.BindWeights ( tQuery, m_tSchema, tMeta.m_sWarning );
// setup query
// must happen before index-level reject, in order to build proper keyword stats
std::unique_ptr<ISphRanker> pRanker = sphCreateRanker ( tXQ, tQuery, tMeta, tTermSetup, tCtx, tMaxSorterSchema );
if ( !pRanker )
return false;
if ( ( tArgs.m_uPackedFactorFlags & SPH_FACTOR_ENABLE ) && tQuery.m_eRanker!=SPH_RANK_EXPR )
tMeta.m_sWarning.SetSprintf ( "packedfactors() and bm25f() requires using an expression ranker" );
tCtx.SetupExtraData ( pRanker.get(), dSorters.GetLength()==1 ? dSorters[0] : nullptr );
const BYTE * pBlobPool = m_tBlobAttrs.GetReadPtr();
pRanker->ExtraData ( EXTRA_SET_BLOBPOOL, (void**)&pBlobPool );
const columnar::Columnar_i * pColumnar = m_pColumnar.get();
pRanker->ExtraData ( EXTRA_SET_COLUMNAR, (void**)&pColumnar );
int iMatchPoolSize = 0;
dSorters.Apply ( [&iMatchPoolSize] ( const ISphMatchSorter * p ) { iMatchPoolSize += p->GetMatchCapacity(); } );
pRanker->ExtraData ( EXTRA_SET_POOL_CAPACITY, (void**)&iMatchPoolSize );
// check for the possible integer overflow in m_dPool.Resize
int64_t iPoolSize = 0;
if ( pRanker->ExtraData ( EXTRA_GET_POOL_SIZE, (void**)&iPoolSize ) && iPoolSize>INT_MAX )
{
tMeta.m_sError.SetSprintf ( "ranking factors pool too big (%d Mb), reduce max_matches", (int)( iPoolSize/1024/1024 ) );
return false;
}
// empty index, empty response!
if ( m_bIsEmpty )
return true;
SetupRowIdBoundaries ( tQuery.m_dFilters, RowID_t(m_iDocinfo), *pRanker );
if ( CheckEarlyReject ( dTransformedFilters, tCtx.m_pFilter.get(), tQuery.m_eCollation, tMaxSorterSchema ) )
{
tMeta.m_iQueryTime += (int)( ( sphMicroTimer()-tmQueryStart )/1000 );
tMeta.m_iCpuTime += sphTaskCpuTimer ()-tmCpuQueryStart;
return true;
}
for ( auto & i : dSorters )
{
i->SetBlobPool ( m_tBlobAttrs.GetReadPtr() );
i->SetColumnar ( m_pColumnar.get() );
}
SwitchProfile ( pProfile, SPH_QSTATE_SETUP_ITER );
int iCutoff = ApplyImplicitCutoff ( tQuery, dSorters, true );
int iOldLen = tMeta.m_tIteratorStats.m_dIterators.GetLength();
for ( auto & i : dSorters )
i->AddDesc ( tMeta.m_tIteratorStats.m_dIterators );
if ( tMeta.m_tIteratorStats.m_dIterators.GetLength()!=iOldLen )
tMeta.m_tIteratorStats.m_iTotal = 1;
CSphVector<CSphFilterSettings> dFiltersAfterIterator; // holds filter settings if they were modified. filters hold pointers to those settings
std::pair<RowidIterator_i *, bool> tSpawned = SpawnIterators ( tQuery, dTransformedFilters, tCtx, tFlx, tMaxSorterSchema, tMeta, iCutoff, tArgs.m_iTotalThreads, dFiltersAfterIterator, pRanker.get() );
std::unique_ptr<RowidIterator_i> pIterator = std::unique_ptr<RowidIterator_i> ( tSpawned.first );
if ( tSpawned.second )
return false;
if ( pIterator )
{
auto pIter = pIterator.get();
pRanker->ExtraData ( EXTRA_SET_ITERATOR, (void**)&pIter );
}
//////////////////////////////////////
// find and weight matching documents
//////////////////////////////////////
bool bFinalPass = !!tCtx.m_dCalcFinal.GetLength();
int iMyTag = bFinalPass ? -1 : tArgs.m_iTag;
assert ( tQuery.m_eMode==SPH_MATCH_ALL || tQuery.m_eMode==SPH_MATCH_PHRASE || tQuery.m_eMode==SPH_MATCH_ANY || tQuery.m_eMode==SPH_MATCH_EXTENDED || tQuery.m_eMode==SPH_MATCH_EXTENDED2 || tQuery.m_eMode==SPH_MATCH_BOOLEAN );
bool bHaveRandom = false;
dSorters.Apply ( [&bHaveRandom] ( const ISphMatchSorter * p ) { bHaveRandom |= p->IsRandom(); } );
bool bUseFactors = !!( tCtx.m_uPackedFactorFlags & SPH_FACTOR_ENABLE );
bool bUseKlist = m_tDeadRowMap.HasDead();
bool bHasSortCalc = !tCtx.m_dCalcSort.IsEmpty();
bool bHasWeightFilter = !!tCtx.m_pWeightFilter;
bool bHasFilterCalc = !tCtx.m_dCalcFilter.IsEmpty();
bool bHasCutoff = iCutoff!=-1;
int iIndex = bUseKlist*64 + bHaveRandom*32 + bUseFactors*16 + bHasSortCalc*8 + bHasWeightFilter*4 + bHasFilterCalc*2 + bHasCutoff;
switch ( iIndex )
{
#define DECL_FNSCAN( _, n, params ) case n: MatchExtended<!!(n&64), !!(n&32), !!(n&16), !!(n&8), !!(n&4), !!(n&2), !!(n&1)> params; break;
BOOST_PP_REPEAT ( 128, DECL_FNSCAN, ( tCtx, tQuery, dSorters, pRanker.get(), iMyTag, tArgs.m_iIndexWeight, iCutoff ) )
#undef DECL_FNSCAN
default:
assert ( 0 && "Internal error" );
break;
}
////////////////////
// cook result sets
////////////////////
SwitchProfile ( pProfile, SPH_QSTATE_FINALIZE );
if ( pIterator )
{
pIterator->AddDesc ( tMeta.m_tIteratorStats.m_dIterators );
tMeta.m_tIteratorStats.m_iTotal = 1;
}
if ( dSorters.any_of ( [&] ( ISphMatchSorter * p ) { return !p->FinalizeJoin ( tMeta.m_sError, tMeta.m_sWarning ); } ) )
return false;
// adjust result sets
if ( bFinalPass )
{
// GotUDF means promise to UDFs that final-stage calls will be evaluated
// a) over the final, pre-limit result set
// b) in the final result set order
bool bGotUDF = false;
DocstoreSession_c tSession;
int64_t iSessionUID = tSession.GetUID();
// spawn buffered readers for the current session
// put them to a global hash
if ( m_pDocstore )
m_pDocstore->CreateReader ( iSessionUID );
DocstoreSession_c::InfoRowID_t tSessionInfo;
tSessionInfo.m_pDocstore = m_pDocstore.get();
tSessionInfo.m_iSessionId = iSessionUID;
for ( auto & i : tCtx.m_dCalcFinal )
{
assert ( i.m_pExpr );
if ( !bGotUDF )
i.m_pExpr->Command ( SPH_EXPR_GET_UDF, &bGotUDF );
if ( m_pDocstore )
i.m_pExpr->Command ( SPH_EXPR_SET_DOCSTORE_ROWID, &tSessionInfo );
}
SphFinalMatchCalc_t tFinal ( tArgs.m_iTag, tCtx );
dSorters.Apply ( [&] ( ISphMatchSorter * p ) { p->Finalize ( tFinal, bGotUDF, tArgs.m_bFinalizeSorters ); } );
}
pRanker->FinalizeCache ( tMaxSorterSchema );
tResult.m_pBlobPool = m_tBlobAttrs.GetReadPtr();
tResult.m_pDocstore = m_pDocstore ? this : nullptr;
tResult.m_pColumnar = m_pColumnar.get();
// query timer
int64_t tmWall = sphMicroTimer() - tmQueryStart;
tMeta.m_iQueryTime += (int)( tmWall/1000 );
tMeta.m_iCpuTime += sphTaskCpuTimer ()-tmCpuQueryStart;
QUERYINFO << GetName() << ": qtm " << (int)(tmWall) << ", " << tQueryStats.m_iFetchedDocs << ", " << tQueryStats.m_iFetchedHits << ", " << tQueryStats.m_iSkips << ", " << dSorters[0]->GetTotalCount();
SwitchProfile ( pProfile, eOldState );
if ( bCollectPredictionCounters )
{
tMeta.m_tStats.Add ( tQueryStats );
tMeta.m_bHasPrediction = true;
}
return true;
}
void CSphIndex_VLN::CreateReader ( int64_t iSessionId ) const
{
if ( !m_pDocstore )
return;
m_pDocstore->CreateReader(iSessionId);
}
bool CSphIndex_VLN::GetDoc ( DocstoreDoc_t & tDoc, DocID_t tDocID, const VecTraits_T<int> * pFieldIds, int64_t iSessionId, bool bPack ) const
{
if ( !m_pDocstore )
return false;
RowID_t tRowID = GetRowidByDocid ( tDocID );
if ( tRowID==INVALID_ROWID || m_tDeadRowMap.IsSet(tRowID) )
return false;
tDoc = m_pDocstore->GetDoc ( tRowID, pFieldIds, iSessionId, bPack );
return true;
}
int CSphIndex_VLN::GetFieldId ( const CSphString & sName, DocstoreDataType_e eType ) const
{
if ( !m_pDocstore )
return -1;
return m_pDocstore->GetFieldId ( sName, eType );
}
//////////////////////////////////////////////////////////////////////////
// INDEX STATUS
//////////////////////////////////////////////////////////////////////////
void CSphIndex_VLN::GetStatus ( CSphIndexStatus* pRes ) const
{
assert ( pRes );
if ( !pRes )
return;
pRes->m_iMapped = m_tAttr.GetLengthBytes ()
+m_tBlobAttrs.GetLengthBytes ()
+m_tWordlist.m_tBuf.GetLengthBytes ()
+m_tDeadRowMap.GetLengthBytes ()
+m_tSkiplists.GetLengthBytes ();
pRes->m_iMappedResident = m_tAttr.GetCoreSize ()
+m_tBlobAttrs.GetCoreSize ()
+m_tWordlist.m_tBuf.GetCoreSize ()
+m_tDeadRowMap.GetCoreSize ()
+m_tSkiplists.GetCoreSize ();
pRes->m_iDead = m_tDeadRowMap.GetNumDeads();
if ( m_pDoclistFile )
{
pRes->m_iMappedDocs = m_pDoclistFile->GetMappedsize ();
pRes->m_iMappedResidentDocs = m_pDoclistFile->GetCoresize ();
pRes->m_iMapped += pRes->m_iMappedDocs;
pRes->m_iMappedResident += pRes->m_iMappedResidentDocs;
}
if ( m_pHitlistFile )
{
pRes->m_iMappedHits = m_pHitlistFile->GetMappedsize ();
pRes->m_iMappedResidentHits = m_pHitlistFile->GetCoresize ();
pRes->m_iMapped += pRes->m_iMappedHits;
pRes->m_iMappedResident += pRes->m_iMappedResidentHits;
}
pRes->m_iRamUse = sizeof(CSphIndex_VLN) + m_dFieldLens.GetLengthBytes() + pRes->m_iMappedResident;
pRes->m_iDiskUse = 0;
CSphVector<IndexFileExt_t> dExts = sphGetExts();
for ( const auto & i : dExts )
{
if ( i.m_eExt==SPH_EXT_SPL )
continue;
CSphString sFile = GetFilename ( i.m_eExt );
struct_stat st;
if ( stat ( sFile.cstr(), &st )==0 )
pRes->m_iDiskUse += st.st_size;
}
// sphWarning ( "Chunks: %d, RAM: %d, DISK: %d", pRes->m_iNumChunks, (int) pRes->m_iRamUse, (int) pRes->m_iMapped );
}
//////////////////////////////////////////////////////////////////////////
// INDEX CHECKING
//////////////////////////////////////////////////////////////////////////
void CSphIndex_VLN::SetDebugCheck ( bool bCheckIdDups, int )
{
m_bDebugCheck = true;
m_bCheckIdDups = bCheckIdDups;
}
// no strnlen on some OSes (Mac OS)
#if !HAVE_STRNLEN
size_t strnlen ( const char * s, size_t iMaxLen )
{
if ( !s )
return 0;
size_t iRes = 0;
while ( *s++ && iRes<iMaxLen )
++iRes;
return iRes;
}
#endif
int CSphIndex_VLN::DebugCheck ( DebugCheckError_i & tReporter, FilenameBuilder_i * pFilenameBuilder )
{
auto pIndexChecker = std::make_unique<DiskIndexChecker_c> ( *this, tReporter );
pIndexChecker->Setup ( m_iDocinfo, m_iDocinfoIndex, m_iMinMaxIndex, m_bCheckIdDups );
// check if index is ready
if ( !m_bPassedAlloc )
tReporter.Fail ( "table not preread" );
if ( !pIndexChecker->OpenFiles() )
return 1;
if ( !LoadHitlessWords ( m_tSettings.m_sHitlessFiles, m_pTokenizer, m_pDict, pIndexChecker->GetHitlessWords(), m_sLastError ) )
tReporter.Fail ( "unable to load hitless words: %s", m_sLastError.cstr() );
CSphSavedFile tStat;
CSphString sError;
const CSphTokenizerSettings & tTokenizerSettings = m_pTokenizer->GetSettings ();
if ( !tTokenizerSettings.m_sSynonymsFile.IsEmpty() )
{
CSphString sSynonymsFile = tTokenizerSettings.m_sSynonymsFile;
if ( pFilenameBuilder )
sSynonymsFile = pFilenameBuilder->GetFullPath ( sSynonymsFile );
if ( !tStat.Collect ( sSynonymsFile.cstr(), &sError ) )
tReporter.Fail ( "unable to open exceptions '%s': %s", sSynonymsFile.cstr(), sError.cstr() );
}
const CSphDictSettings & tDictSettings = m_pDict->GetSettings ();
const char * pStop = tDictSettings.m_sStopwords.cstr();
while (true)
{
// find next name start
while ( pStop && *pStop && isspace(*pStop) ) pStop++;
if ( !pStop || !*pStop ) break;
const char * sNameStart = pStop;
// find next name end
while ( *pStop && !isspace(*pStop) ) pStop++;
CSphString sStopFile;
sStopFile.SetBinary ( sNameStart, int ( pStop-sNameStart ) );
if ( pFilenameBuilder )
sStopFile = pFilenameBuilder->GetFullPath ( sStopFile );
if ( !tStat.Collect ( sStopFile.cstr(), &sError ) )
tReporter.Fail ( "unable to open stopwords '%s': %s", sStopFile.cstr(), sError.cstr() );
}
if ( !tDictSettings.m_dWordforms.GetLength() )
{
ARRAY_FOREACH ( i, tDictSettings.m_dWordforms )
{
CSphString sWordforms = tDictSettings.m_dWordforms[i];
if ( pFilenameBuilder )
sWordforms = pFilenameBuilder->GetFullPath ( sWordforms );
if ( !tStat.Collect ( sWordforms.cstr(), &sError ) )
tReporter.Fail ( "unable to open wordforms '%s': %s", sWordforms.cstr(), sError.cstr() );
}
}
pIndexChecker->Check();
tReporter.Done();
return (int)Min ( tReporter.GetNumFails(), 255 ); // this is the exitcode; so cap it
} // NOLINT function length
static void AddFields ( const char * sQuery, CSphSchema & tSchema )
{
CSphColumnInfo tField;
const char * sToken = sQuery;
if ( !sToken )
{
tField.m_sName = "dummy_field"; // for query with only all fields, @*
tSchema.AddField ( tField );
return;
}
const char * OPTION_RELAXED = "@@relaxed";
const auto OPTION_RELAXED_LEN = (int) strlen ( OPTION_RELAXED );
if ( strncmp ( sToken, OPTION_RELAXED, OPTION_RELAXED_LEN )==0 && !sphIsAlpha ( sToken[OPTION_RELAXED_LEN] ) )
sToken += OPTION_RELAXED_LEN;
while ( *sToken )
{
if ( *sToken!='@' )
{
sToken++;
continue;
}
sToken++;
if ( !*sToken )
break;
if ( *sToken=='!' || *sToken=='*' )
sToken++;
if ( !*sToken )
break;
bool bBlock = ( *sToken=='(' );
if ( bBlock )
sToken++;
if ( !*sToken )
break;
// handle block with field names
while ( *sToken )
{
const char * sField = sToken;
while ( *sToken && sphIsAlpha( *sToken ) )
sToken++;
int iLen = int ( sToken - sField );
if ( iLen )
{
tField.m_sName.SetBinary ( sField, iLen );
if ( !tSchema.GetField ( tField.m_sName.cstr() ) )
tSchema.AddField ( tField );
}
if ( !bBlock )
break;
if ( *sToken && *sToken==',' )
sToken++;
if ( *sToken && *sToken==')' )
break;
}
}
if ( !tSchema.GetFieldsCount() )
{
tField.m_sName = "dummy_field"; // for query with only all fields, @*
tSchema.AddField ( tField );
}
}
Bson_t EmptyBson ()
{
Bson_t dEmpty;
return dEmpty;
}
Bson_t Explain ( ExplainQueryArgs_t & tArgs )
{
if ( !tArgs.m_szQuery )
return EmptyBson ();
std::unique_ptr<QueryParser_i> pQueryParser ( sphCreatePlainQueryParser() );
CSphVector<BYTE> dFiltered;
const BYTE * sModifiedQuery = (const BYTE *)tArgs.m_szQuery;
if ( tArgs.m_pFieldFilter && sModifiedQuery && tArgs.m_pFieldFilter->Apply ( sModifiedQuery, dFiltered, true ) )
sModifiedQuery = dFiltered.Begin();
CSphSchema tSchema;
const CSphSchema * pSchema = tArgs.m_pSchema;
if ( !pSchema )
{
pSchema = &tSchema;
// need to fill up schema with fields from query
AddFields ( tArgs.m_szQuery, tSchema );
}
XQQuery_t tParsed;
if ( !pQueryParser->ParseQuery ( tParsed, (const char*)sModifiedQuery, nullptr, tArgs.m_pQueryTokenizer, nullptr, pSchema, tArgs.m_pDict, *tArgs.m_pSettings, tArgs.m_pMorphFields ) )
{
TlsMsg::Err ( tParsed.m_sParseError );
return EmptyBson ();
}
sphTransformExtendedQuery ( &tParsed.m_pRoot, *tArgs.m_pSettings, false, nullptr );
bool bWordDict = tArgs.m_pDict->GetSettings().m_bWordDict;
int iExpandKeywords = ExpandKeywords ( tArgs.m_iExpandKeywords, QUERY_OPT_DEFAULT, *tArgs.m_pSettings, bWordDict );
if ( iExpandKeywords!=KWE_DISABLED )
{
sphQueryExpandKeywords ( &tParsed.m_pRoot, *tArgs.m_pSettings, iExpandKeywords, bWordDict );
tParsed.m_pRoot->Check ( true );
}
// this should be after keyword expansion
TransformAotFilter ( tParsed.m_pRoot, tArgs.m_pDict->GetWordforms(), *tArgs.m_pSettings );
//// expanding prefix in word dictionary case
if ( tArgs.m_bExpandPrefix )
{
CSphQueryResultMeta tMeta;
CSphScopedPayload tPayloads;
ExpansionContext_t tExpCtx;
tExpCtx.m_pWordlist = tArgs.m_pWordlist;
tExpCtx.m_pResult = &tMeta;
tExpCtx.m_iMinPrefixLen = tArgs.m_pSettings->GetMinPrefixLen ( tArgs.m_pDict->GetSettings().m_bWordDict );
tExpCtx.m_iMinInfixLen = tArgs.m_pSettings->m_iMinInfixLen;
tExpCtx.m_iExpansionLimit = tArgs.m_iExpansionLimit;
tExpCtx.m_bHasExactForms = ( tArgs.m_pDict->HasMorphology() || tArgs.m_pSettings->m_bIndexExactWords );
tExpCtx.m_bMergeSingles = false;
tExpCtx.m_pPayloads = &tPayloads;
tExpCtx.m_pIndexData = tArgs.m_pIndexData;
tParsed.m_pRoot = sphExpandXQNode ( tParsed.m_pRoot, tExpCtx );
}
return sphExplainQuery ( tParsed.m_pRoot, *pSchema, tParsed.m_dZones );
}
Bson_t CSphIndex_VLN::ExplainQuery ( const CSphString & sQuery ) const
{
ExplainQueryArgs_t tArgs;
tArgs.m_szQuery = sQuery.cstr();
tArgs.m_pSchema = &GetMatchSchema();
tArgs.m_pDict = GetStatelessDict ( m_pDict );
SetupStarDict ( tArgs.m_pDict );
SetupExactDict ( tArgs.m_pDict );
if ( m_pFieldFilter )
tArgs.m_pFieldFilter = m_pFieldFilter->Clone();
tArgs.m_pSettings = &m_tSettings;
tArgs.m_pWordlist = &m_tWordlist;
tArgs.m_pQueryTokenizer = m_pQueryTokenizer;
tArgs.m_iExpandKeywords = m_tMutableSettings.m_iExpandKeywords;
tArgs.m_iExpansionLimit = m_iExpansionLimit;
tArgs.m_bExpandPrefix = ( m_pDict->GetSettings().m_bWordDict && IsStarDict ( m_pDict->GetSettings().m_bWordDict ) );
tArgs.m_pMorphFields = &m_tMorphFields;
return Explain ( tArgs );
}
bool CSphIndex_VLN::AlterSI ( CSphString & sError )
{
if ( !IsSecondaryLibLoaded() )
{
sError = "secondary index library not loaded";
return false;
}
MergeCb_c tMonitor;
StrVec_t dCurFiles, dNewFiles;
if ( !SiRecreate ( tMonitor, *this, m_iDocinfo, dCurFiles, dNewFiles, sError ) )
return false;
ARRAY_FOREACH ( i, dCurFiles )
{
StrVec_t dFilesFrom(1);
StrVec_t dFilesTo(1);
CSphString sFileOld;
bool bCurExists = sphFileExists ( dCurFiles[i].cstr() );
if ( bCurExists )
{
sFileOld.SetSprintf ( "%s.old", dCurFiles[i].cstr() );
dFilesFrom[0] = dCurFiles[i];
dFilesTo[0] = sFileOld;
if ( !RenameWithRollback ( dFilesFrom, dFilesTo, sError ) )
return false;
if ( !m_tSI.Drop ( dCurFiles[i], sError ) )
return false;
}
dFilesFrom[0] = dNewFiles[i];
dFilesTo[0] = dCurFiles[i];
if ( !RenameWithRollback ( dFilesFrom, dFilesTo, sError ) )
return false;
if ( !m_tSI.Load ( dCurFiles[i].cstr(), sError ) )
return false;
if ( bCurExists )
::unlink ( sFileOld.cstr() );
}
return true;
}
//////////////////////////////////////////////////////////////////////////
// KEYWORDS STORING DICTIONARY
//////////////////////////////////////////////////////////////////////////
class CRtDictKeywords final : public ISphRtDictWraper
{
private:
DictRefPtr_c m_pBase;
SmallStringHash_T<int, 8192> m_hKeywords;
CSphVector<BYTE> m_dPackedKeywords {0};
CSphString m_sWarning;
int m_iKeywordsOverrun = 0;
CSphString m_sWord; // For allocation reuse.
const bool m_bStoreID = false;
protected:
~CRtDictKeywords () final = default; // Is here since protected. fixme! remove
public:
explicit CRtDictKeywords ( DictRefPtr_c pBase, bool bStoreID )
: m_pBase ( std::move ( pBase ) )
, m_bStoreID ( bStoreID )
{
m_dPackedKeywords.Add ( 0 ); // avoid zero offset at all costs
}
SphWordID_t GetWordID ( BYTE * pWord ) final
{
SphWordID_t tWordID = m_pBase->GetWordID ( pWord );
if ( tWordID )
return AddKeyword ( pWord, tWordID );
return 0;
}
SphWordID_t GetWordIDWithMarkers ( BYTE * pWord ) final
{
SphWordID_t tWordID = m_pBase->GetWordIDWithMarkers ( pWord );
if ( tWordID )
return AddKeyword ( pWord, tWordID );
return 0;
}
SphWordID_t GetWordIDNonStemmed ( BYTE * pWord ) final
{
SphWordID_t tWordID = m_pBase->GetWordIDNonStemmed ( pWord );
if ( tWordID )
return AddKeyword ( pWord, tWordID );
return 0;
}
SphWordID_t GetWordID ( const BYTE * pWord, int iLen, bool bFilterStops ) final
{
SphWordID_t tWordID = m_pBase->GetWordID ( pWord, iLen, bFilterStops );
if ( tWordID )
return AddKeyword ( pWord, tWordID );
return 0;
}
const BYTE * GetPackedKeywords () final { return m_dPackedKeywords.Begin(); }
int GetPackedLen () final { return m_dPackedKeywords.GetLength(); }
void ResetKeywords() final
{
m_dPackedKeywords.Resize ( 0 );
m_dPackedKeywords.Add ( 0 ); // avoid zero offset at all costs
m_hKeywords.Reset();
}
void LoadStopwords ( const char * sFiles, FilenameBuilder_i * pFilenameBuilder, const TokenizerRefPtr_c& pTokenizer, bool bStripFile ) final { m_pBase->LoadStopwords ( sFiles, pFilenameBuilder, pTokenizer, bStripFile ); }
void LoadStopwords ( const CSphVector<SphWordID_t> & dStopwords ) final { m_pBase->LoadStopwords ( dStopwords ); }
void WriteStopwords ( Writer_i & tWriter ) const final { m_pBase->WriteStopwords ( tWriter ); }
void WriteStopwords ( JsonEscapedBuilder & tOut ) const final { m_pBase->WriteStopwords ( tOut ); }
bool LoadWordforms ( const StrVec_t & dFiles, const CSphEmbeddedFiles * pEmbedded, const TokenizerRefPtr_c& pTokenizer, const char * szIndex ) final
{ return m_pBase->LoadWordforms ( dFiles, pEmbedded, pTokenizer, szIndex ); }
void WriteWordforms ( Writer_i & tWriter ) const final { m_pBase->WriteWordforms ( tWriter ); }
void WriteWordforms ( JsonEscapedBuilder & tOut ) const final { m_pBase->WriteWordforms ( tOut ); }
int SetMorphology ( const char * szMorph, CSphString & sMessage ) final { return m_pBase->SetMorphology ( szMorph, sMessage ); }
void Setup ( const CSphDictSettings & tSettings ) final { m_pBase->Setup ( tSettings ); }
const CSphDictSettings & GetSettings () const final { return m_pBase->GetSettings(); }
const CSphVector <CSphSavedFile> & GetStopwordsFileInfos () const final { return m_pBase->GetStopwordsFileInfos(); }
const CSphVector <CSphSavedFile> & GetWordformsFileInfos () const final { return m_pBase->GetWordformsFileInfos(); }
const CSphMultiformContainer * GetMultiWordforms () const final { return m_pBase->GetMultiWordforms(); }
bool IsStopWord ( const BYTE * pWord ) const final { return m_pBase->IsStopWord ( pWord ); }
const char * GetLastWarning() const final { return m_iKeywordsOverrun ? m_sWarning.cstr() : nullptr; }
void ResetWarning () final { m_iKeywordsOverrun = 0; }
uint64_t GetSettingsFNV () const final { return m_pBase->GetSettingsFNV(); }
private:
SphWordID_t AddKeyword ( const BYTE * pWord, SphWordID_t tWordID )
{
auto iLen = (int) strlen ( ( const char * ) pWord );
// stemmer might squeeze out the word
if ( !iLen )
return 0;
// fix of very long word (zones)
if ( iLen>( SPH_MAX_WORD_LEN * 3 ) )
{
int iClippedLen = SPH_MAX_WORD_LEN * 3;
m_sWord.SetBinary ( ( const char * ) pWord, iClippedLen );
if ( m_iKeywordsOverrun )
{
m_sWarning.SetSprintf ( "word overrun buffer, clipped!!! clipped='%s', length=%d(%d)", m_sWord.cstr ()
, iClippedLen, iLen );
} else
{
m_sWarning.SetSprintf ( ", clipped='%s', length=%d(%d)", m_sWord.cstr (), iClippedLen, iLen );
}
iLen = iClippedLen;
m_iKeywordsOverrun++;
} else
{
m_sWord.SetBinary ( ( const char * ) pWord, iLen );
}
int * pOff = m_hKeywords ( m_sWord );
if ( pOff )
{
return *pOff;
}
int iOff = m_dPackedKeywords.GetLength ();
int iPackedLen = iOff + iLen + 1;
if ( m_bStoreID )
iPackedLen += sizeof ( tWordID );
m_dPackedKeywords.Resize ( iPackedLen );
m_dPackedKeywords[iOff] = ( BYTE ) ( iLen & 0xFF );
memcpy ( m_dPackedKeywords.Begin () + iOff + 1, pWord, iLen );
if ( m_bStoreID )
memcpy ( m_dPackedKeywords.Begin () + iOff + 1 + iLen, &tWordID, sizeof(tWordID) );
m_hKeywords.Add ( iOff, m_sWord );
return iOff;
}
};
ISphRtDictWraperRefPtr_c sphCreateRtKeywordsDictionaryWrapper ( DictRefPtr_c pBase, bool bStoreID )
{
return ISphRtDictWraperRefPtr_c { new CRtDictKeywords ( std::move ( pBase ), bStoreID ) };
}
//////////////////////////////////////////////////////////////////////////
// DICTIONARY FACTORIES
//////////////////////////////////////////////////////////////////////////
const CSphSourceStats& GetStubStats()
{
static CSphSourceStats tDummy;
return tDummy;
}
//////////////////////////////////////////////////////////////////////////
#if WITH_RE2
class CSphFieldRegExps : public ISphFieldFilter
{
public:
CSphFieldRegExps () = default;
CSphFieldRegExps ( const StrVec_t& m_dRegexps, CSphString & sError );
~CSphFieldRegExps() override;
int Apply ( const BYTE * sField, int iLength, CSphVector<BYTE> & dStorage, bool ) final;
void GetSettings ( CSphFieldFilterSettings & tSettings ) const final;
std::unique_ptr<ISphFieldFilter> Clone ( const FieldFilterOptions_t * pOptions ) const final;
void AddRegExp ( const char * sRegExp, StringBuilder_c & sErrors );
private:
struct RegExp_t
{
CSphString m_sFrom;
CSphString m_sTo;
RE2 * m_pRE2;
};
CSphVector<RegExp_t> m_dRegexps;
bool m_bCloned = true;
};
CSphFieldRegExps::CSphFieldRegExps ( const StrVec_t &dRegexps, CSphString &sError )
: m_bCloned ( false )
{
StringBuilder_c sErrors (", ");
for ( const auto &sRegexp : dRegexps )
AddRegExp ( sRegexp.cstr (), sErrors );
sErrors.MoveTo ( sError );
}
CSphFieldRegExps::~CSphFieldRegExps ()
{
if ( !m_bCloned )
{
for ( auto & dRegexp : m_dRegexps )
SafeDelete ( dRegexp.m_pRE2 );
}
}
int CSphFieldRegExps::Apply ( const BYTE * sField, int iLength, CSphVector<BYTE> & dStorage, bool )
{
dStorage.Resize ( 0 );
if ( !sField || !*sField )
return 0;
bool bReplaced = false;
std::string sRe2 = ( iLength ? std::string ( (const char *) sField, iLength ) : (const char *) sField );
ARRAY_FOREACH ( i, m_dRegexps )
{
assert ( m_dRegexps[i].m_pRE2 );
bReplaced |= ( RE2::GlobalReplace ( &sRe2, *m_dRegexps[i].m_pRE2, m_dRegexps[i].m_sTo.cstr() )>0 );
}
if ( !bReplaced )
return 0;
auto iDstLen = (int) sRe2.length();
dStorage.Resize ( iDstLen+4 ); // string SAFETY_GAP
strncpy ( (char *)dStorage.Begin(), sRe2.c_str(), dStorage.GetLength() );
return iDstLen;
}
void CSphFieldRegExps::GetSettings ( CSphFieldFilterSettings & tSettings ) const
{
tSettings.m_dRegexps.Resize ( m_dRegexps.GetLength() );
ARRAY_FOREACH ( i, m_dRegexps )
tSettings.m_dRegexps[i].SetSprintf ( "%s => %s", m_dRegexps[i].m_sFrom.cstr(), m_dRegexps[i].m_sTo.cstr() );
}
void CSphFieldRegExps::AddRegExp ( const char * sRegExp, StringBuilder_c & sErrors )
{
if ( m_bCloned )
return;
const char sSplitter [] = "=>";
const char * sSplit = strstr ( sRegExp, sSplitter );
if ( !sSplit )
{
sErrors << "mapping token (=>) not found";
return;
} else if ( strstr ( sSplit + strlen ( sSplitter ), sSplitter ) )
{
sErrors << "mapping token (=>) found more than once";
return;
}
RegExp_t tRegExp;
tRegExp.m_sFrom.SetBinary ( sRegExp, int ( sSplit-sRegExp ) );
tRegExp.m_sTo = sSplit + strlen ( sSplitter );
tRegExp.m_sFrom.Trim();
tRegExp.m_sTo.Trim();
RE2::Options tOptions;
tOptions.set_encoding ( RE2::Options::Encoding::EncodingUTF8 );
auto pRE2 = std::make_unique<RE2> ( tRegExp.m_sFrom.cstr(), tOptions );
std::string sRE2Error;
if ( !pRE2->CheckRewriteString ( tRegExp.m_sTo.cstr(), &sRE2Error ) )
{
sErrors.Sprintf( "\"%s => %s\" is not a valid mapping: %s", tRegExp.m_sFrom.cstr(), tRegExp.m_sTo.cstr(), sRE2Error.c_str() );
return;
}
tRegExp.m_pRE2 = pRE2.release();
m_dRegexps.Add ( std::move ( tRegExp ) );
}
std::unique_ptr<ISphFieldFilter> CSphFieldRegExps::Clone ( const FieldFilterOptions_t * pOptions ) const
{
auto pCloned = std::make_unique<CSphFieldRegExps>();
pCloned->m_dRegexps = m_dRegexps;
return pCloned;
}
#endif
#if WITH_RE2
std::unique_ptr<ISphFieldFilter> sphCreateRegexpFilter ( const CSphFieldFilterSettings & tFilterSettings, CSphString & sError )
{
return std::make_unique<CSphFieldRegExps> ( tFilterSettings.m_dRegexps, sError );
}
#else
std::unique_ptr<ISphFieldFilter> sphCreateRegexpFilter ( const CSphFieldFilterSettings &, CSphString & )
{
return nullptr;
}
#endif
/////////////////////////////////////////////////////////////////////////////
// GENERIC SOURCE
/////////////////////////////////////////////////////////////////////////////
bool ParseMorphFields ( const CSphString & sMorphology, const CSphString & sMorphFields, const CSphVector<CSphColumnInfo> & dFields, CSphBitvec & tMorphFields, CSphString & sError )
{
if ( sMorphology.IsEmpty() || sMorphFields.IsEmpty() )
return true;
CSphString sFields = sMorphFields;
sFields.ToLower();
sFields.Trim();
if ( !sFields.Length() )
return true;
OpenHashTable_T<int64_t, int> hFields;
ARRAY_FOREACH ( i, dFields )
hFields.Add ( sphFNV64 ( dFields[i].m_sName.cstr() ), i );
StringBuilder_c sMissed;
int iFieldsGot = 0;
tMorphFields.Init ( dFields.GetLength() );
tMorphFields.Set(); // all fields have morphology by default
for ( const char * sCur=sFields.cstr(); ; )
{
while ( *sCur && ( sphIsSpace ( *sCur ) || *sCur==',' ) )
++sCur;
if ( !*sCur )
break;
const char * sStart = sCur;
while ( *sCur && !sphIsSpace ( *sCur ) && *sCur!=',' )
++sCur;
if ( sCur==sStart )
break;
int * pField = hFields.Find ( sphFNV64 ( sStart, int ( sCur - sStart ) ) );
if ( !pField )
{
const char * sSep = sMissed.GetLength() ? ", " : "";
sMissed.Appendf ( "%s%.*s", sSep, (int)(sCur - sStart), sStart );
break;
}
iFieldsGot++;
tMorphFields.BitClear ( *pField );
}
// no fields set - need to reset bitvec to skip checks on indexing
if ( !iFieldsGot )
tMorphFields.Init ( 0 );
if ( sMissed.GetLength() )
sError.SetSprintf ( "morphology_skip_fields contains out of schema fields: %s", sMissed.cstr() );
return ( !sMissed.GetLength() );
}
bool SchemaConfigureCheckAttribute ( const CSphSchema & tSchema, const CSphColumnInfo & tCol, CSphString & sError )
{
if ( tCol.m_sName.IsEmpty() )
{
sError.SetSprintf ( "column number %d has no name", tCol.m_iIndex );
return false;
}
if ( tSchema.GetAttr ( tCol.m_sName.cstr() ) )
{
sError.SetSprintf ( "can not add multiple attributes with same name '%s'", tCol.m_sName.cstr () );
return false;
}
if ( CSphSchema::IsReserved ( tCol.m_sName.cstr() ) )
{
sError.SetSprintf ( "%s is not a valid attribute name", tCol.m_sName.cstr() );
return false;
}
return true;
}
/////////////////////////////////////////////////////////////////////////////
void sphSetJsonOptions ( bool bStrict, bool bAutoconvNumbers, bool bKeynamesToLowercase )
{
g_bJsonStrict = bStrict;
g_bJsonAutoconvNumbers = bAutoconvNumbers;
g_bJsonKeynamesToLowercase = bKeynamesToLowercase;
}
void SetPseudoSharding ( bool bSet )
{
g_bPseudoSharding = bSet;
}
bool GetPseudoSharding()
{
return g_bPseudoSharding;
}
void SetPseudoShardingThresh ( int iThresh )
{
g_iPseudoShardingThresh = iThresh;
}
void SetMergeSettings ( const BuildBufferSettings_t & tSettings )
{
g_tMergeSettings = tSettings;
}
//////////////////////////////////////////////////////////////////////////
int sphDictCmp ( const char * pStr1, int iLen1, const char * pStr2, int iLen2 )
{
assert ( pStr1 && pStr2 );
assert ( iLen1 && iLen2 );
const int iCmpLen = Min ( iLen1, iLen2 );
return memcmp ( pStr1, pStr2, iCmpLen );
}
int sphDictCmpStrictly ( const char * pStr1, int iLen1, const char * pStr2, int iLen2 )
{
assert ( pStr1 && pStr2 );
assert ( iLen1 && iLen2 );
const int iCmpLen = Min ( iLen1, iLen2 );
const int iCmpRes = memcmp ( pStr1, pStr2, iCmpLen );
return iCmpRes==0 ? iLen1-iLen2 : iCmpRes;
}
ISphWordlist::Args_t::Args_t ( bool bPayload, ExpansionContext_t & tCtx )
: ExpansionTrait_t ( tCtx )
, m_bPayload ( bPayload )
, m_tExpansionStats ( tCtx.m_tExpansionStats )
{
m_sBuf.Reserve ( 2048 * SPH_MAX_WORD_LEN * 3 );
m_dExpanded.Reserve ( 2048 );
m_pPayload = nullptr;
m_iTotalDocs = 0;
m_iTotalHits = 0;
}
void ISphWordlist::Args_t::AddExpanded ( const BYTE * sName, int iLen, int iDocs, int iHits )
{
SphExpanded_t & tExpanded = m_dExpanded.Add();
tExpanded.m_iDocs = iDocs;
tExpanded.m_iHits = iHits;
int iOff = m_sBuf.GetLength();
tExpanded.m_iNameOff = iOff;
m_sBuf.Resize ( iOff + iLen + 1 );
memcpy ( m_sBuf.Begin()+iOff, sName, iLen );
m_sBuf[iOff+iLen] = '\0';
}
const char * ISphWordlist::Args_t::GetWordExpanded ( int iIndex ) const
{
assert ( m_dExpanded[iIndex].m_iNameOff<m_sBuf.GetLength() );
return (const char *)m_sBuf.Begin() + m_dExpanded[iIndex].m_iNameOff;
}
void ExpansionContext_t::AggregateStats ()
{
if ( m_pResult )
m_pResult->AddStat ( m_tExpansionStats );
}
static bool operator < ( const InfixBlock_t & a, const char * b )
{
return strcmp ( a.m_sInfix, b )<0;
}
static bool operator == ( const InfixBlock_t & a, const char * b )
{
return strcmp ( a.m_sInfix, b )==0;
}
static bool operator < ( const char * a, const InfixBlock_t & b )
{
return strcmp ( a, b.m_sInfix )<0;
}
bool sphLookupInfixCheckpoints ( const char * sInfix, int iBytes, const BYTE * pInfixes, const CSphVector<InfixBlock_t> & dInfixBlocks, int iInfixCodepointBytes, CSphVector<DWORD> & dCheckpoints )
{
assert ( pInfixes );
char dInfixBuf[3*SPH_MAX_WORD_LEN+4];
memcpy ( dInfixBuf, sInfix, iBytes );
dInfixBuf[iBytes] = '\0';
// lookup block
int iBlock = FindSpan ( dInfixBlocks, dInfixBuf );
if ( iBlock<0 )
return false;
const BYTE * pBlock = pInfixes + dInfixBlocks[iBlock].m_iOffset;
// decode block and check for exact infix match
// block entry is { byte edit_code, byte[] key_append, zint data_len, zint data_deltas[] }
// zero edit_code marks block end
BYTE sKey[32];
while (true)
{
// unpack next key
int iCode = *pBlock++;
if ( !iCode )
break;
BYTE * pOut = sKey;
if ( iInfixCodepointBytes==1 )
{
pOut = sKey + ( iCode>>4 );
iCode &= 15;
while ( iCode-- )
*pOut++ = *pBlock++;
} else
{
int iKeep = ( iCode>>4 );
while ( iKeep-- )
pOut += sphUtf8CharBytes ( *pOut ); ///< wtf? *pOut (=sKey) is NOT initialized?
assert ( pOut-sKey<=(int)sizeof(sKey) );
iCode &= 15;
while ( iCode-- )
{
int i = sphUtf8CharBytes ( *pBlock );
while ( i-- )
*pOut++ = *pBlock++;
}
assert ( pOut-sKey<=(int)sizeof(sKey) );
}
assert ( pOut-sKey<(int)sizeof(sKey) );
#ifndef NDEBUG
*pOut = '\0'; // handy for debugging, but not used for real matching
#endif
if ( pOut==sKey+iBytes && memcmp ( sKey, dInfixBuf, iBytes )==0 )
{
// found you! decompress the data
int iLast = 0;
int iPackedLen = UnzipIntBE ( pBlock );
const BYTE * pMax = pBlock + iPackedLen;
while ( pBlock<pMax )
{
iLast += UnzipIntBE ( pBlock );
dCheckpoints.Add ( (DWORD)iLast );
}
return true;
}
int iSkip = UnzipIntBE ( pBlock );
pBlock += iSkip;
}
return false;
}
static int BuildUtf8Offsets ( const char * sWord, int iLen, int * pOff, int DEBUGARG ( iBufSize ) )
{
const BYTE * s = (const BYTE *)sWord;
const BYTE * sEnd = s + iLen;
int * pStartOff = pOff;
*pOff = 0;
pOff++;
while ( s<sEnd )
{
sphUTF8Decode ( s );
*pOff = int ( s-(const BYTE *)sWord );
pOff++;
}
assert ( pOff-pStartOff<iBufSize );
return int ( pOff - pStartOff - 1 );
}
static void sphBuildNGrams ( const char * sWord, int iLen, int iGramLen, CSphVector<char> & dNGrams )
{
int dOff[SPH_MAX_WORD_LEN+1];
int iCodepoints = BuildUtf8Offsets ( sWord, iLen, dOff, sizeof ( dOff ) );
if ( iCodepoints<iGramLen )
return;
dNGrams.Reserve ( iLen*3 );
for ( int iChar=0; iChar<=iCodepoints-iGramLen; iChar++ )
{
int iStart = dOff[iChar];
int iEnd = dOff[iChar+iGramLen];
int iSize = iEnd - iStart;
char * sDst = dNGrams.AddN ( iSize + 1 );
memcpy ( sDst, sWord+iStart, iSize );
sDst[iSize] = '\0';
}
// n-grams split by delimiter
// however it's still null terminated
dNGrams.Last() = '\0';
}
template <typename T>
int sphLevenshtein ( const T * sWord1, int iLen1, const T * sWord2, int iLen2, CSphVector<int> & dTmp )
{
if ( !iLen1 )
return iLen2;
if ( !iLen2 )
return iLen1;
// FIXME!!! remove extra length after utf8->codepoints conversion
dTmp.Resize ( Max ( iLen1, iLen2 )+1 );
for ( int i=0; i<=iLen2; i++ )
dTmp[i] = i;
for ( int i=0; i<iLen1; i++ )
{
dTmp[0] = i+1;
int iWord1 = sWord1[i];
int iDist = i;
for ( int j=0; j<iLen2; j++ )
{
int iDistNext = dTmp[j+1];
dTmp[j+1] = ( iWord1==sWord2[j] ? iDist : ( 1 + Min ( Min ( iDist, iDistNext ), dTmp[j] ) ) );
iDist = iDistNext;
}
}
return dTmp[iLen2];
}
int sphLevenshtein ( const char * sWord1, int iLen1, const char * sWord2, int iLen2, CSphVector<int> & dTmp )
{
return sphLevenshtein<char> ( sWord1, iLen1, sWord2, iLen2, dTmp );
}
int sphLevenshtein ( const int * sWord1, int iLen1, const int * sWord2, int iLen2, CSphVector<int> & dTmp )
{
return sphLevenshtein<int> ( sWord1, iLen1, sWord2, iLen2, dTmp );
}
// sort by distance(uLen) desc, checkpoint index(uOff) asc
struct CmpHistogram_fn
{
inline bool IsLess ( const Slice_t & a, const Slice_t & b ) const
{
return ( a.m_uLen>b.m_uLen || ( a.m_uLen==b.m_uLen && a.m_uOff<b.m_uOff ) );
}
};
// convert utf8 to unicode string
static int DecodeUtf8 ( const BYTE * sWord, int * pBuf )
{
if ( !sWord )
return 0;
int * pCur = pBuf;
while ( *sWord )
{
*pCur = sphUTF8Decode ( sWord );
pCur++;
}
return int ( pCur - pBuf );
}
bool SuggestResult_t::SetWord ( const char * sWord, const TokenizerRefPtr_c & pTok, bool bUseLastWord, bool bSetSentence )
{
assert ( pTok->IsQueryTok() );
TokenizerRefPtr_c pTokenizer = pTok->Clone ( SPH_CLONE );
pTokenizer->SetBuffer ( (BYTE *)const_cast<char*>(sWord), (int) strlen ( sWord ) );
int iWord = 0;
const char * pPrevToken = nullptr;
const char * pLastToken = nullptr;
for ( const BYTE* pToken = pTokenizer->GetToken(); pToken; pToken = pTokenizer->GetToken() )
{
if ( bUseLastWord || iWord==0 )
m_sWord = (const char *)pToken;
if ( bSetSentence )
{
pPrevToken = pLastToken;
pLastToken = pTokenizer->GetTokenEnd();
}
iWord++;
if ( !bUseLastWord && !bSetSentence )
break;
if ( pTokenizer->TokenIsBlended() )
pTokenizer->SkipBlended();
}
if ( bSetSentence && pPrevToken )
{
int iSentenceLen = pPrevToken - sWord;
m_sSentence.SetBinary ( sWord, iSentenceLen );
}
m_iLen = m_sWord.Length();
m_iCodepoints = DecodeUtf8 ( (const BYTE *)m_sWord.cstr(), m_dCodepoints );
m_bUtf8 = ( m_iCodepoints!=m_iLen );
bool bValidWord = ( m_iCodepoints>=3 );
if ( bValidWord )
{
// lets generate bigrams for short words as trigrams for 5char word could all contain the same wrong symbol
if ( m_iCodepoints<6 )
m_iNGramLen = 2;
sphBuildNGrams ( m_sWord.cstr(), m_iLen, m_iNGramLen, m_dTrigrams );
}
return bValidWord;
}
void SuggestResult_t::Flattern ( int iLimit )
{
int iCount = Min ( m_dMatched.GetLength(), iLimit );
m_dMatched.Resize ( iCount );
}
struct SliceInt_t
{
int m_iOff;
int m_iEnd;
};
static void SuggestGetChekpoints ( const ISphWordlistSuggest * pWordlist, int iInfixCodepointBytes, const CSphVector<char> & dTrigrams, CSphVector<Slice_t> & dCheckpoints, SuggestResult_t & tStats )
{
CSphVector<DWORD> dWordCp; // FIXME!!! add mask that trigram matched
// v1 - current index, v2 - end index
CSphVector<SliceInt_t> dMergeIters;
int iReserveLen = 0;
int iLastLen = 0;
const char * sTrigram = dTrigrams.Begin();
const char * sTrigramEnd = sTrigram + dTrigrams.GetLength();
while (true)
{
auto iTrigramLen = (int) strlen ( sTrigram );
int iInfixLen = sphGetInfixLength ( sTrigram, iTrigramLen, iInfixCodepointBytes );
// count how many checkpoint we will get
iReserveLen = Max ( iReserveLen, dWordCp.GetLength () - iLastLen );
iLastLen = dWordCp.GetLength();
dMergeIters.Add().m_iOff = dWordCp.GetLength();
pWordlist->SuffixGetChekpoints ( tStats, sTrigram, iInfixLen, dWordCp );
sTrigram += iTrigramLen + 1;
if ( sTrigram>=sTrigramEnd )
break;
if ( sphInterrupted() )
return;
}
if ( !dWordCp.GetLength() )
return;
for ( int i=0; i<dMergeIters.GetLength()-1; i++ )
{
dMergeIters[i].m_iEnd = dMergeIters[i+1].m_iOff;
}
dMergeIters.Last().m_iEnd = dWordCp.GetLength();
// v1 - checkpoint index, v2 - checkpoint count
dCheckpoints.Reserve ( iReserveLen );
dCheckpoints.Resize ( 0 );
// merge sorting of already ordered checkpoints
while (true)
{
DWORD iMinCP = UINT_MAX;
DWORD iMinIndex = UINT_MAX;
ARRAY_FOREACH ( i, dMergeIters )
{
const SliceInt_t & tElem = dMergeIters[i];
if ( tElem.m_iOff<tElem.m_iEnd && dWordCp[tElem.m_iOff]<iMinCP )
{
iMinIndex = i;
iMinCP = dWordCp[tElem.m_iOff];
}
}
if ( iMinIndex==UINT_MAX )
break;
if ( dCheckpoints.GetLength()==0 || iMinCP!=dCheckpoints.Last().m_uOff )
{
dCheckpoints.Add().m_uOff = iMinCP;
dCheckpoints.Last().m_uLen = 1;
} else
{
dCheckpoints.Last().m_uLen++;
}
assert ( iMinIndex!=UINT_MAX && iMinCP!=UINT_MAX );
assert ( dMergeIters[iMinIndex].m_iOff<dMergeIters[iMinIndex].m_iEnd );
dMergeIters[iMinIndex].m_iOff++;
}
dCheckpoints.Sort ( CmpHistogram_fn() );
}
struct CmpSuggestOrder_fn
{
bool IsLess ( const SuggestWord_t & a, const SuggestWord_t & b ) const
{
if ( a.m_iDistance==b.m_iDistance )
return a.m_iDocs>b.m_iDocs;
return a.m_iDistance<b.m_iDistance;
}
};
void SuggestMergeDocs ( CSphVector<SuggestWord_t> & dMatched )
{
if ( !dMatched.GetLength() )
return;
dMatched.Sort ( bind ( &SuggestWord_t::m_iNameHash ) );
int iSrc = 1;
int iDst = 1;
while ( iSrc<dMatched.GetLength() )
{
if ( dMatched[iDst-1].m_iNameHash==dMatched[iSrc].m_iNameHash )
{
dMatched[iDst-1].m_iDocs += dMatched[iSrc].m_iDocs;
iSrc++;
} else
{
dMatched[iDst++] = dMatched[iSrc++];
}
}
dMatched.Resize ( iDst );
}
template <bool SINGLE_BYTE_CHAR>
void SuggestMatchWords ( const ISphWordlistSuggest * pWordlist, const CSphVector<Slice_t> & dCheckpoints, const SuggestArgs_t & tArgs, SuggestResult_t & tRes )
{
// walk those checkpoints, check all their words
const int iMinWordLen = ( tArgs.m_iDeltaLen>0 ? Max ( 0, tRes.m_iCodepoints - tArgs.m_iDeltaLen ) : -1 );
const int iMaxWordLen = ( tArgs.m_iDeltaLen>0 ? tRes.m_iCodepoints + tArgs.m_iDeltaLen : INT_MAX );
OpenHashTable_T<int64_t, int> dHashTrigrams;
const char * sBuf = tRes.m_dTrigrams.Begin ();
const char * sEnd = sBuf + tRes.m_dTrigrams.GetLength();
while ( sBuf<sEnd )
{
dHashTrigrams.Add ( sphCRC32 ( sBuf ), 1 );
while ( *sBuf ) sBuf++;
sBuf++;
}
int dCharOffset[SPH_MAX_WORD_LEN+1];
int dDictWordCodepoints[SPH_MAX_WORD_LEN];
const int iQLen = Max ( tArgs.m_iQueueLen, tArgs.m_iLimit );
const int iRejectThr = tArgs.m_iRejectThr;
int iQueueRejected = 0;
int iLastBad = 0;
bool bSorted = true;
const bool bMergeWords = tRes.m_bMergeWords;
const bool bHasExactDict = tRes.m_bHasExactDict;
const int iMaxEdits = tArgs.m_iMaxEdits;
const bool bNonCharAllowed = tArgs.m_bNonCharAllowed;
const int iNGramLen = tRes.m_iNGramLen;
tRes.m_dMatched.Reserve ( iQLen * 2 );
CmpSuggestOrder_fn fnCmp;
CSphVector<int> dLevenshteinTmp;
ARRAY_FOREACH ( i, dCheckpoints )
{
DWORD iCP = dCheckpoints[i].m_uOff;
pWordlist->SetCheckpoint ( tRes, iCP );
ISphWordlistSuggest::DictWord_t tWord;
while ( pWordlist->ReadNextWord ( tRes, tWord ) )
{
const char * sDictWord = tWord.m_sWord;
int iDictWordLen = tWord.m_iLen;
int iDictCodepoints = iDictWordLen;
// for stemmer \ lematizer suggest should match only original words
if ( bHasExactDict && sDictWord[0]!=MAGIC_WORD_HEAD_NONSTEMMED )
continue;
if ( bHasExactDict )
{
// skip head MAGIC_WORD_HEAD_NONSTEMMED char
sDictWord++;
iDictWordLen--;
iDictCodepoints--;
}
if_const ( SINGLE_BYTE_CHAR )
{
if ( iDictWordLen<=iMinWordLen || iDictWordLen>=iMaxWordLen )
continue;
}
int iChars = 0;
const BYTE * s = (const BYTE *)sDictWord;
const BYTE * sDictWordEnd = s + iDictWordLen;
bool bGotNonChar = false;
while ( !bGotNonChar && s<sDictWordEnd )
{
dCharOffset[iChars] = int ( s - (const BYTE *)sDictWord );
int iCode = sphUTF8Decode ( s );
if ( !bNonCharAllowed )
bGotNonChar = ( iCode<'A' || ( iCode>'Z' && iCode<'a' ) ); // skip words with any numbers or special characters
if_const ( !SINGLE_BYTE_CHAR )
{
dDictWordCodepoints[iChars] = iCode;
}
iChars++;
}
dCharOffset[iChars] = int ( s - (const BYTE *)sDictWord );
iDictCodepoints = iChars;
if_const ( !SINGLE_BYTE_CHAR )
{
if ( iDictCodepoints<=iMinWordLen || iDictCodepoints>=iMaxWordLen )
continue;
}
// skip word in case of non char symbol found
if ( bGotNonChar )
continue;
// FIXME!!! should we skip in such cases
// utf8 reference word != single byte dictionary word
// single byte reference word != utf8 dictionary word
bool bGotMatch = false;
for ( int iChar=0; iChar<=iDictCodepoints-iNGramLen && !bGotMatch; iChar++ )
{
int iStart = dCharOffset[iChar];
int iEnd = dCharOffset[iChar+iNGramLen];
bGotMatch = ( dHashTrigrams.Find ( sphCRC32 ( sDictWord + iStart, iEnd - iStart ) )!=NULL );
}
// skip word in case of no trigrams matched
if ( !bGotMatch )
continue;
int iDist = INT_MAX;
if_const ( SINGLE_BYTE_CHAR )
iDist = sphLevenshtein ( tRes.m_sWord.cstr(), tRes.m_iLen, sDictWord, iDictWordLen, dLevenshteinTmp );
else
iDist = sphLevenshtein ( tRes.m_dCodepoints, tRes.m_iCodepoints, dDictWordCodepoints, iDictCodepoints, dLevenshteinTmp );
// skip word in case of too many edits
if ( iDist>iMaxEdits )
continue;
SuggestWord_t tElem;
tElem.m_iNameOff = tRes.m_dBuf.GetLength();
tElem.m_iLen = iDictWordLen;
tElem.m_iDistance = iDist;
tElem.m_iDocs = tWord.m_iDocs;
// store in k-buffer up to 2*QLen words
if ( !iLastBad || fnCmp.IsLess ( tElem, tRes.m_dMatched[iLastBad] ) )
{
tElem.m_iNameHash = bMergeWords ? sphCRC32 ( sDictWord, iDictWordLen ) : 0;
tRes.m_dMatched.Add ( tElem );
BYTE * sWord = tRes.m_dBuf.AddN ( iDictWordLen+1 );
memcpy ( sWord, sDictWord, iDictWordLen );
sWord[iDictWordLen] = '\0';
iQueueRejected = 0;
bSorted = false;
} else
{
iQueueRejected++;
}
// sort k-buffer in case of threshold overflow
if ( tRes.m_dMatched.GetLength()>iQLen*2 )
{
if ( bMergeWords )
SuggestMergeDocs ( tRes.m_dMatched );
int iTotal = tRes.m_dMatched.GetLength();
tRes.m_dMatched.Sort ( CmpSuggestOrder_fn() );
bSorted = true;
// there might be less than necessary elements after merge operation
if ( iTotal>iQLen )
{
iQueueRejected += iTotal - iQLen;
tRes.m_dMatched.Resize ( iQLen );
}
iLastBad = tRes.m_dMatched.GetLength()-1;
}
}
if ( sphInterrupted () )
break;
// stop dictionary unpacking in case queue rejects a lot of matched words
if ( iQueueRejected && iQueueRejected>iQLen*iRejectThr )
break;
}
// sort at least once or any unsorted
if ( !bSorted )
{
if ( bMergeWords )
SuggestMergeDocs ( tRes.m_dMatched );
tRes.m_dMatched.Sort ( CmpSuggestOrder_fn() );
}
}
void sphGetSuggest ( const ISphWordlistSuggest * pWordlist, int iInfixCodepointBytes, const SuggestArgs_t & tArgs, SuggestResult_t & tRes )
{
assert ( pWordlist );
CSphVector<Slice_t> dCheckpoints;
SuggestGetChekpoints ( pWordlist, iInfixCodepointBytes, tRes.m_dTrigrams, dCheckpoints, tRes );
if ( !dCheckpoints.GetLength() )
return;
if ( tRes.m_bUtf8 )
SuggestMatchWords<false> ( pWordlist, dCheckpoints, tArgs, tRes );
else
SuggestMatchWords<true> ( pWordlist, dCheckpoints, tArgs, tRes );
if ( sphInterrupted() )
return;
tRes.Flattern ( tArgs.m_iLimit );
}
//////////////////////////////////////////////////////////////////////////
void IteratorStats_t::Merge ( const IteratorStats_t & tSrc )
{
m_iTotal += tSrc.m_iTotal;
for ( const auto & i : tSrc.m_dIterators )
{
bool bFound = false;
for ( auto & j : m_dIterators )
if ( i.m_sAttr==j.m_sAttr && i.m_sType==j.m_sType )
{
j.m_iUsed += i.m_iUsed;
bFound = true;
}
if ( !bFound )
m_dIterators.Add(i);
}
}
//////////////////////////////////////////////////////////////////////////
// CSphQueryResultMeta
//////////////////////////////////////////////////////////////////////////
void RemoveDictSpecials ( CSphString & sWord, bool bBigram )
{
if ( sWord.cstr()[0]==MAGIC_WORD_HEAD )
{
*const_cast<char *>( sWord.cstr() ) = '*';
} else if ( sWord.cstr()[0]==MAGIC_WORD_HEAD_NONSTEMMED )
{
*const_cast<char *>( sWord.cstr() ) = '=';
}
if ( bBigram )
{
const char * p = strchr ( sWord.cstr(), MAGIC_WORD_BIGRAM );
if ( p )
*const_cast<char *>(p) = ' ';
}
}
const CSphString & RemoveDictSpecials ( const CSphString & sWord, CSphString & sFixed, bool bBigram )
{
const CSphString * pFixed = &sWord;
if ( sWord.cstr()[0]==MAGIC_WORD_HEAD )
{
sFixed = sWord;
*const_cast<char *>( sFixed.cstr() ) = '*';
pFixed = &sFixed;
} else if ( sWord.cstr()[0]==MAGIC_WORD_HEAD_NONSTEMMED )
{
sFixed = sWord;
*const_cast<char *>( sFixed.cstr() ) = '=';
pFixed = &sFixed;
}
if ( bBigram )
{
const char * p = strchr ( sWord.cstr(), MAGIC_WORD_BIGRAM );
if ( p )
{
sFixed.SetSprintf ( "\"%s\"", sWord.cstr() );
*( const_cast<char *> ( sFixed.cstr() ) + ( p - sWord.cstr() ) + 1 ) = ' ';
pFixed = &sFixed;
}
}
return *pFixed;
}
void CSphQueryResultMeta::AddStat ( const CSphString & sWord, int64_t iDocs, int64_t iHits )
{
CSphString sBuf;
const CSphString & tFixed = RemoveDictSpecials ( sWord, sBuf, m_bBigram );
WordStat_t & tStats = m_hWordStats.AddUnique ( tFixed );
tStats.first += iDocs;
tStats.second += iHits;
}
void CSphQueryResultMeta::AddStat ( const ExpansionStats_t & tExpansionStats )
{
m_tExpansionStats.m_iTerms += tExpansionStats.m_iTerms;
m_tExpansionStats.m_iMerged += tExpansionStats.m_iMerged;
}
void CSphQueryResultMeta::MergeWordStats ( const CSphQueryResultMeta & tOther )
{
const auto & hOtherStats = tOther.m_hWordStats;
if ( !m_hWordStats.GetLength () )
{
// nothing has been set yet; just copy
m_hWordStats = hOtherStats;
} else
{
for ( auto & tSrc : hOtherStats )
{
WordStat_t & tDst = m_hWordStats.AddUnique ( tSrc.first );
tDst.first += tSrc.second.first;
tDst.second += tSrc.second.second;
}
}
AddStat ( tOther.m_tExpansionStats );
}
///< sort wordstat to achieve reproducable result over different runs
CSphFixedVector<SmallStringHash_T<CSphQueryResultMeta::WordStat_t>::KeyValue_t *> CSphQueryResultMeta::MakeSortedWordStat () const
{
using kv_t = SmallStringHash_T<WordStat_t>::KeyValue_t;
CSphFixedVector<kv_t*> dWords { m_hWordStats.GetLength() };
int i = 0;
for ( auto & tStat : m_hWordStats )
dWords[i++] = &tStat;
dWords.Sort ( Lesser ( [] ( kv_t * l, kv_t * r ) { return l->first<r->first; } ) );
return dWords;
}
//////////////////////////////////////////////////////////////////////////
CSphVector<const ISphSchema *> SorterSchemas ( const VecTraits_T<ISphMatchSorter *> & dSorters, int iSkipSorter )
{
CSphVector<const ISphSchema *> dSchemas;
if ( !dSorters.IsEmpty() )
{
dSchemas.Reserve ( dSorters.GetLength() - 1 );
ARRAY_FOREACH ( i, dSorters )
{
if ( i==iSkipSorter || !dSorters[i] )
continue;
const ISphSchema * pSchema = dSorters[i]->GetSchema();
dSchemas.Add ( pSchema );
}
}
return dSchemas;
}
std::pair<int, int> GetMaxSchemaIndexAndMatchCapacity ( const VecTraits_T<ISphMatchSorter*> & dSorters )
{
int iMaxSchemaSize = -1;
int iMaxSchemaIndex = -1;
int iMatchPoolSize = 0;
ARRAY_FOREACH ( i, dSorters )
{
iMatchPoolSize += dSorters[i]->GetMatchCapacity();
if ( dSorters[i]->GetSchema ()->GetAttrsCount ()>iMaxSchemaSize )
{
iMaxSchemaSize = dSorters[i]->GetSchema ()->GetAttrsCount ();
iMaxSchemaIndex = i;
}
}
return {iMaxSchemaIndex, iMatchPoolSize};
}
volatile int &sphGetTFO () noexcept
{
static int iTFO = 0;
return iTFO;
}
volatile bool& sphGetbCpuStat () noexcept
{
static bool bCpuStat = false;
return bCpuStat;
}
| 403,305
|
C++
|
.cpp
| 10,398
| 35.926524
| 596
| 0.702158
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,884
|
pollable_event.cpp
|
manticoresoftware_manticoresearch/src/pollable_event.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "pollable_event.h"
#include "searchdaemon.h"
#if HAVE_EVENTFD
#include <sys/eventfd.h>
#endif
#if !HAVE_EVENTFD
static bool CreateSocketPair ( int &iSock1, int &iSock2, CSphString &sError )
{
#if _WIN32
union {
struct sockaddr_in inaddr;
struct sockaddr addr;
} tAddr;
int iListen = (int)socket ( AF_INET, SOCK_STREAM, IPPROTO_TCP );
if ( iListen<0 )
{
sError.SetSprintf ( "failed to create listen socket: %s", sphSockError() );
return false;
}
memset ( &tAddr, 0, sizeof ( tAddr ) );
tAddr.inaddr.sin_family = AF_INET;
tAddr.inaddr.sin_addr.s_addr = htonl ( INADDR_LOOPBACK );
tAddr.inaddr.sin_port = 0;
auto tCloseListen = AtScopeExit ( [&iListen] { if ( iListen>=0 ) sphSockClose (iListen); } );
if ( bind ( iListen, &tAddr.addr, sizeof ( tAddr.inaddr ) )<0 )
{
sError.SetSprintf ( "failed to bind listen socket: %s", sphSockError() );
return false;
}
int iAddrBufLen = sizeof ( tAddr );
memset ( &tAddr, 0, sizeof ( tAddr ) );
if ( getsockname ( iListen, &tAddr.addr, &iAddrBufLen )<0 )
{
sError.SetSprintf ( "failed to get socket description: %s", sphSockError() );
return false;
}
tAddr.inaddr.sin_addr.s_addr = htonl ( INADDR_LOOPBACK );
tAddr.inaddr.sin_family = AF_INET;
if ( listen ( iListen, 5 )<0 )
{
sError.SetSprintf ( "failed to listen socket: %s", sphSockError() );
return false;
}
int iWrite = (int)socket ( AF_INET, SOCK_STREAM, 0 );
auto tCloseWrite = AtScopeExit ( [&iWrite] { if ( iWrite>=0 ) sphSockClose (iWrite); } );
if ( iWrite<0 )
{
sError.SetSprintf ( "failed to create write socket: %s", sphSockError() );
return false;
}
if ( connect ( iWrite, &tAddr.addr, sizeof(tAddr.addr) )<0 )
{
sError.SetSprintf ( "failed to connect to loopback: %s\n", sphSockError() );
return false;
}
int iRead = (int)accept ( iListen, NULL, NULL );
if ( iRead<0 )
{
sError.SetSprintf ( "failed to accept loopback: %s\n", sphSockError() );
}
iSock1 = iRead;
iSock2 = iWrite;
iWrite = -1; // protect from tCloseWrite
sphSetSockNodelay ( iSock2 );
#else
int dSockets[2] = { -1, -1 };
if ( socketpair ( AF_LOCAL, SOCK_STREAM, 0, dSockets )!=0 )
{
sError.SetSprintf ( "failed to create socketpair: %s", sphSockError () );
return false;
}
iSock1 = dSockets[0];
iSock2 = dSockets[1];
#endif
if ( sphSetSockNB ( iSock1 )<0 || sphSetSockNB ( iSock2 )<0 )
{
sError.SetSprintf ( "failed to set socket non-block: %s", sphSockError () );
SafeCloseSocket ( iSock1 );
SafeCloseSocket ( iSock2 );
return false;
}
return true;
}
#endif
PollableEvent_t::PollableEvent_t ()
{
int iRead = -1;
int iWrite = -1;
#if HAVE_EVENTFD
int iFD = eventfd ( 0, EFD_NONBLOCK );
if ( iFD==-1 )
m_sError.SetSprintf ( "failed to create eventfd: %s", strerrorm ( errno ) );
iRead = iWrite = iFD;
#else
CreateSocketPair ( iRead, iWrite, m_sError );
#endif
if ( iRead==-1 || iWrite==-1 )
sphWarning ( "PollableEvent_t create error:%s", m_sError.cstr () );
m_iPollablefd = iRead;
m_iSignalEvent = iWrite;
}
PollableEvent_t::~PollableEvent_t ()
{
Close ();
}
void PollableEvent_t::Close ()
{
SafeCloseSocket ( m_iPollablefd );
#if !HAVE_EVENTFD
SafeCloseSocket ( m_iSignalEvent );
#endif
}
int PollableEvent_t::PollableErrno ()
{
return sphSockGetErrno ();
}
bool PollableEvent_t::FireEvent () const
{
if ( m_iSignalEvent==-1 )
return true;
int iErrno = EAGAIN;
while ( iErrno==EAGAIN || iErrno==EWOULDBLOCK )
{
uint64_t uVal = 1;
#if HAVE_EVENTFD
int iPut = ::write ( m_iSignalEvent, &uVal, 8 );
#else
int iPut = sphSockSend ( m_iSignalEvent, ( const char * ) &uVal, 8 );
#endif
if ( iPut==8 )
return true;
iErrno = PollableErrno ();
};
return false;
}
// just wipe-out a fired event to free queue, we don't need the value itself
void PollableEvent_t::DisposeEvent () const
{
assert ( m_iPollablefd!=-1 );
uint64_t uVal = 0;
while ( true )
{
#if HAVE_EVENTFD
auto iRead = ::read ( m_iPollablefd, &uVal, 8 );
if ( iRead==8 )
break;
#else
// socket-pair case might stack up some values and these should be read
int iRead = sphSockRecv ( m_iPollablefd, ( char * ) &uVal, 8 );
if ( iRead<=0 )
break;
#endif
}
}
| 4,642
|
C++
|
.cpp
| 165
| 26.042424
| 95
| 0.688399
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,885
|
attrindex_merge.cpp
|
manticoresoftware_manticoresearch/src/attrindex_merge.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "histogram.h"
#include "docidlookup.h"
#include "docstore.h"
#include "indexfiles.h"
#include "killlist.h"
#include "attrindex_builder.h"
#include "secondarylib.h"
#include "knnmisc.h"
#include "jsonsi.h"
#include "attrindex_merge.h"
#include "tracer.h"
#include <boost/preprocessor/repetition/repeat.hpp>
class AttrMerger_c::Impl_c
{
AttrIndexBuilder_c m_tMinMax;
HistogramContainer_c m_tHistograms;
CSphVector<PlainOrColumnar_t> m_dAttrsForHistogram;
std::unique_ptr<knn::Builder_i> m_pKNNBuilder;
std::unique_ptr<JsonSIBuilder_i> m_pJsonSIBuilder;
CSphVector<PlainOrColumnar_t> m_dAttrsForKNN;
CSphFixedVector<DocidRowidPair_t> m_dDocidLookup {0};
CSphWriter m_tWriterSPA;
std::unique_ptr<BlobRowBuilder_i> m_pBlobRowBuilder;
std::unique_ptr<DocstoreBuilder_i> m_pDocstoreBuilder;
std::unique_ptr<columnar::Builder_i> m_pColumnarBuilder;
RowID_t m_tResultRowID = 0;
int64_t m_iTotalBytes = 0;
MergeCb_c & m_tMonitor;
CSphString & m_sError;
int64_t m_iTotalDocs;
BuildBufferSettings_t m_tBufferSettings;
CSphVector<PlainOrColumnar_t> m_dSiAttrs;
std::unique_ptr<SI::Builder_i> m_pSIdxBuilder;
CSphVector<ESphExt> m_dCreatedFiles;
private:
template<bool WITH_BLOB, bool WITH_STRIDE, bool WITH_DOCSTORE, bool WITH_SI, bool WITH_KNN, bool PURE_COLUMNAR>
bool CopyMixedAttributes_T ( const CSphIndex & tIndex, const VecTraits_T<RowID_t>& dRowMap );
CSphString GetTmpFilename ( const CSphIndex* pIdx, ESphExt eExt )
{
m_dCreatedFiles.Add ( eExt );
assert ( pIdx );
return pIdx->GetTmpFilename ( eExt );
}
public:
Impl_c ( MergeCb_c & tMonitor, CSphString & sError, int64_t iTotalDocs, const BuildBufferSettings_t & tSettings )
: m_tMonitor ( tMonitor )
, m_sError ( sError )
, m_iTotalDocs ( iTotalDocs )
, m_tBufferSettings ( tSettings )
{}
bool Prepare ( const CSphIndex * pSrcIndex, const CSphIndex * pDstIndex );
bool CopyAttributes ( const CSphIndex & tIndex, const VecTraits_T<RowID_t>& dRowMap, DWORD uAlive );
bool FinishMergeAttributes ( const CSphIndex * pDstIndex, BuildHeader_t& tBuildHeader, StrVec_t* pCreatedFiles );
void AddCreatedFiles ( const CSphIndex * pDstIndex, StrVec_t * pCreatedFiles )
{
if ( pCreatedFiles )
m_dCreatedFiles.for_each ( [pCreatedFiles, pDstIndex] ( auto eExt ) { pCreatedFiles->Add ( pDstIndex->GetTmpFilename ( eExt ) ); } );
}
};
bool AttrMerger_c::Impl_c::Prepare ( const CSphIndex * pSrcIndex, const CSphIndex * pDstIndex )
{
auto sSPA = GetTmpFilename ( pDstIndex, SPH_EXT_SPA );
const CSphSchema & tDstSchema = pDstIndex->GetMatchSchema();
if ( tDstSchema.HasNonColumnarAttrs() && !m_tWriterSPA.OpenFile ( sSPA, m_sError ) )
return false;
if ( tDstSchema.HasBlobAttrs() )
{
m_pBlobRowBuilder = sphCreateBlobRowBuilder ( pSrcIndex->GetMatchSchema(), GetTmpFilename ( pDstIndex, SPH_EXT_SPB ), pSrcIndex->GetSettings().m_tBlobUpdateSpace, m_tBufferSettings.m_iBufferAttributes, m_sError );
if ( !m_pBlobRowBuilder )
return false;
}
if ( pDstIndex->GetDocstore() )
{
m_pDocstoreBuilder = CreateDocstoreBuilder ( GetTmpFilename ( pDstIndex, SPH_EXT_SPDS ), pDstIndex->GetDocstore()->GetDocstoreSettings(), m_tBufferSettings.m_iBufferStorage, m_sError );
if ( !m_pDocstoreBuilder )
return false;
for ( int i = 0; i < tDstSchema.GetFieldsCount(); ++i )
if ( tDstSchema.IsFieldStored(i) )
m_pDocstoreBuilder->AddField ( tDstSchema.GetFieldName(i), DOCSTORE_TEXT );
for ( int i = 0; i < tDstSchema.GetAttrsCount(); ++i )
if ( tDstSchema.IsAttrStored(i) )
m_pDocstoreBuilder->AddField ( tDstSchema.GetAttr(i).m_sName, DOCSTORE_ATTR );
}
if ( tDstSchema.HasColumnarAttrs() )
{
m_pColumnarBuilder = CreateColumnarBuilder ( tDstSchema, GetTmpFilename ( pDstIndex, SPH_EXT_SPC ), m_tBufferSettings.m_iBufferColumnar, m_sError );
if ( !m_pColumnarBuilder )
return false;
}
if ( tDstSchema.HasKNNAttrs() )
{
m_pKNNBuilder = BuildCreateKNN ( tDstSchema, m_iTotalDocs, m_dAttrsForKNN, m_sError );
if ( !m_pKNNBuilder )
return false;
}
if ( tDstSchema.HasJsonSIAttrs() )
{
m_pJsonSIBuilder = CreateJsonSIBuilder ( tDstSchema, pDstIndex->GetTmpFilename(SPH_EXT_SPB), GetTmpFilename ( pDstIndex, SPH_EXT_SPJIDX ), m_sError );
if ( !m_pJsonSIBuilder )
return false;
}
if ( IsSecondaryLibLoaded() )
{
m_pSIdxBuilder = CreateIndexBuilder ( m_tBufferSettings.m_iSIMemLimit, tDstSchema, GetTmpFilename ( pDstIndex, SPH_EXT_SPIDX ), m_dSiAttrs, m_tBufferSettings.m_iBufferStorage, m_sError );
if ( !m_pSIdxBuilder )
return false;
}
m_tMinMax.Init ( tDstSchema );
m_dDocidLookup.Reset ( m_iTotalDocs );
BuildCreateHistograms ( m_tHistograms, m_dAttrsForHistogram, tDstSchema );
m_tResultRowID = 0;
return true;
}
template <bool WITH_BLOB, bool WITH_STRIDE, bool WITH_DOCSTORE, bool WITH_SI, bool WITH_KNN, bool PURE_COLUMNAR>
bool AttrMerger_c::Impl_c::CopyMixedAttributes_T ( const CSphIndex & tIndex, const VecTraits_T<RowID_t>& dRowMap )
{
auto dColumnarIterators = CreateAllColumnarIterators ( tIndex.GetColumnar(), tIndex.GetMatchSchema() );
CSphVector<int64_t> dTmp;
int iColumnarIdLoc = PURE_COLUMNAR ? 0 : ( tIndex.GetMatchSchema ().GetAttr ( 0 ).IsColumnar () ? 0 : -1 );
const CSphRowitem * pRow = tIndex.GetRawAttrs ();
const BYTE * pRawBlobAttrs = PURE_COLUMNAR ? nullptr : tIndex.GetRawBlobAttrs ();
int iStride = tIndex.GetMatchSchema().GetRowSize();
CSphFixedVector<CSphRowitem> dTmpRow ( iStride );
auto iStrideBytes = dTmpRow.GetLengthBytes();
const CSphColumnInfo* pBlobLocator = WITH_BLOB ? tIndex.GetMatchSchema().GetAttr ( sphGetBlobLocatorName() ) : nullptr;
int iChunk = tIndex.m_iChunk;
m_tMonitor.SetEvent ( MergeCb_c::E_MERGEATTRS_START, iChunk );
AT_SCOPE_EXIT ( [this, iChunk] { m_tMonitor.SetEvent ( MergeCb_c::E_MERGEATTRS_FINISHED, iChunk ); } );
for ( RowID_t tRowID = 0, tRows = (RowID_t)dRowMap.GetLength64(); tRowID < tRows; ++tRowID, pRow += PURE_COLUMNAR ? 0 : iStride )
{
if ( dRowMap[tRowID] == INVALID_ROWID )
continue;
m_tMonitor.SetEvent ( MergeCb_c::E_MERGEATTRS_PULSE, iChunk );
if ( m_tMonitor.NeedStop() )
return false;
// limit granted by caller code
assert ( m_tResultRowID != INVALID_ROWID );
if constexpr ( !PURE_COLUMNAR )
m_tMinMax.Collect ( pRow );
if constexpr ( WITH_BLOB )
{
const BYTE * pOldBlobRow = pRawBlobAttrs + sphGetRowAttr ( pRow, pBlobLocator->m_tLocator );
std::pair<SphOffset_t, SphOffset_t> tOffsetSize = m_pBlobRowBuilder->Flush ( pOldBlobRow );
if ( m_pJsonSIBuilder )
m_pJsonSIBuilder->AddRowOffsetSize ( tOffsetSize );
memcpy ( dTmpRow.Begin(), pRow, iStrideBytes );
sphSetRowAttr ( dTmpRow.Begin(), pBlobLocator->m_tLocator, tOffsetSize.first );
m_tWriterSPA.PutBytes ( dTmpRow.Begin(), iStrideBytes );
} else if constexpr ( WITH_STRIDE )
m_tWriterSPA.PutBytes ( pRow, iStrideBytes );
DocID_t tDocID = 0;
ARRAY_FOREACH ( i, dColumnarIterators )
{
auto & tIt = dColumnarIterators[i];
SphAttr_t tAttr = SetColumnarAttr ( i, tIt.second, m_pColumnarBuilder.get(), tIt.first, tRowID, dTmp );
if ( i==iColumnarIdLoc )
tDocID = tAttr;
}
if constexpr ( !PURE_COLUMNAR )
{
if ( iColumnarIdLoc < 0 )
tDocID = sphGetDocID(pRow);
}
if constexpr ( PURE_COLUMNAR )
{
assert ( !pRow );
assert ( !pRawBlobAttrs );
}
BuildStoreHistograms ( tRowID, pRow, pRawBlobAttrs, dColumnarIterators, m_dAttrsForHistogram, m_tHistograms );
if constexpr ( WITH_DOCSTORE )
m_pDocstoreBuilder->AddDoc ( m_tResultRowID, tIndex.GetDocstore()->GetDoc ( tRowID, nullptr, -1, false ) );
if constexpr ( WITH_SI )
{
m_pSIdxBuilder->SetRowID ( m_tResultRowID );
BuildStoreSI ( tRowID, pRow, tIndex.GetRawBlobAttrs(), dColumnarIterators, m_dSiAttrs, m_pSIdxBuilder.get(), dTmp );
}
if constexpr ( WITH_KNN )
if ( !BuildStoreKNN ( tRowID, pRow, tIndex.GetRawBlobAttrs(), dColumnarIterators, m_dAttrsForKNN, *m_pKNNBuilder ) )
{
m_sError = m_pKNNBuilder->GetError().c_str();
return false;
}
m_dDocidLookup[m_tResultRowID] = { tDocID, m_tResultRowID };
++m_tResultRowID;
}
return true;
}
bool AttrMerger_c::Impl_c::CopyAttributes ( const CSphIndex & tIndex, const VecTraits_T<RowID_t>& dRowMap, DWORD uAlive )
{
if ( !uAlive )
return true;
// that is very empyric, however is better than nothing.
m_iTotalBytes += tIndex.GetStats().m_iTotalBytes * ( (float)uAlive / (float)dRowMap.GetLength64() );
const bool bPureColumnar = !tIndex.GetRawAttrs ();
const bool bBlob = !bPureColumnar && !!m_pBlobRowBuilder;
const bool bStride = !bPureColumnar && tIndex.GetMatchSchema ().GetRowSize ()>0;
const bool bDocstore = !!m_pDocstoreBuilder;
const bool bSI = !!m_pSIdxBuilder;
const bool bKNN = !!m_pKNNBuilder;
int iIndex = bPureColumnar*32+bKNN*16+bSI*8+bDocstore*4+bStride*2+bBlob;
switch ( iIndex )
{
#define DECL_COPYMIX( _, n, params ) case n: return CopyMixedAttributes_T<!!(n&1), !!(n&2), !!(n&4), !!(n&8), !!(n&16), !!(n&32)> params;
BOOST_PP_REPEAT ( 64, DECL_COPYMIX, ( tIndex, dRowMap ) )
#undef DECL_COPYMIX
default:
assert ( 0 && "Internal error" );
break;
}
return false;
}
bool AttrMerger_c::Impl_c::FinishMergeAttributes ( const CSphIndex * pDstIndex, BuildHeader_t& tBuildHeader, StrVec_t* pCreatedFiles )
{
m_tMinMax.FinishCollect();
assert ( m_tResultRowID==m_iTotalDocs );
tBuildHeader.m_iDocinfo = m_iTotalDocs;
tBuildHeader.m_iTotalDocuments = m_iTotalDocs;
tBuildHeader.m_iTotalBytes = m_iTotalBytes;
m_dDocidLookup.Sort ( CmpDocidLookup_fn() );
if ( !WriteDocidLookup ( GetTmpFilename ( pDstIndex, SPH_EXT_SPT ), m_dDocidLookup, m_sError ) )
return false;
if ( pDstIndex->GetMatchSchema().HasNonColumnarAttrs() )
{
if ( m_iTotalDocs )
{
tBuildHeader.m_iMinMaxIndex = m_tWriterSPA.GetPos() / sizeof(CSphRowitem);
const auto& dMinMaxRows = m_tMinMax.GetCollected();
m_tWriterSPA.PutBytes ( dMinMaxRows.Begin(), dMinMaxRows.GetLengthBytes64() );
tBuildHeader.m_iDocinfoIndex = ( dMinMaxRows.GetLength() / pDstIndex->GetMatchSchema().GetRowSize() / 2 ) - 1;
}
m_tWriterSPA.CloseFile();
if ( m_tWriterSPA.IsError() )
return false;
}
if ( m_pBlobRowBuilder && !m_pBlobRowBuilder->Done(m_sError) )
return false;
std::string sErrorSTL;
if ( m_pColumnarBuilder && !m_pColumnarBuilder->Done(sErrorSTL) )
{
m_sError = sErrorSTL.c_str();
return false;
}
if ( !m_tHistograms.Save ( GetTmpFilename ( pDstIndex, SPH_EXT_SPHI ), m_sError ) )
return false;
if ( !CheckDocsCount ( m_tResultRowID, m_sError ) )
return false;
if ( m_pDocstoreBuilder )
m_pDocstoreBuilder->Finalize();
std::string sError;
if ( m_pSIdxBuilder && !m_pSIdxBuilder->Done ( sError ) )
{
m_sError = sError.c_str();
return false;
}
if ( m_pJsonSIBuilder && !m_pJsonSIBuilder->Done(m_sError) )
return false;
if ( m_pKNNBuilder && !m_pKNNBuilder->Save ( GetTmpFilename ( pDstIndex, SPH_EXT_SPKNN ).cstr(), m_tBufferSettings.m_iBufferStorage, sError ) )
{
m_sError = sError.c_str();
return false;
}
if ( !WriteDeadRowMap ( GetTmpFilename ( pDstIndex, SPH_EXT_SPM ), m_tResultRowID, m_sError ) )
return false;
return true;
}
AttrMerger_c::AttrMerger_c ( MergeCb_c& tMonitor, CSphString& sError, int64_t iTotalDocs, const BuildBufferSettings_t & tSettings )
: m_pImpl { std::make_unique<Impl_c> ( tMonitor, sError, iTotalDocs, tSettings ) }
{}
AttrMerger_c::~AttrMerger_c() = default;
bool AttrMerger_c::Prepare ( const CSphIndex* pSrcIndex, const CSphIndex* pDstIndex )
{
TRACE_CORO ( "sph", "AttrMerger_c::Prepare" );
return m_pImpl->Prepare ( pSrcIndex, pDstIndex );
}
bool AttrMerger_c::CopyAttributes ( const CSphIndex& tIndex, const VecTraits_T<RowID_t>& dRowMap, DWORD uAlive )
{
TRACE_CORO ( "sph", "AttrMerger_c::CopyAttributes" );
return m_pImpl->CopyAttributes ( tIndex, dRowMap, uAlive );
}
bool AttrMerger_c::FinishMergeAttributes ( const CSphIndex* pDstIndex, BuildHeader_t& tBuildHeader, StrVec_t* pCreatedFiles )
{
TRACE_CORO ( "sph", "AttrMerger_c::FinishMergeAttributes" );
bool bOk = m_pImpl->FinishMergeAttributes ( pDstIndex, tBuildHeader, pCreatedFiles );
m_pImpl->AddCreatedFiles ( pDstIndex, pCreatedFiles );
return bOk;
}
/////////////////////////////////////////////////////////////////////////////
class SiBuilder_c
{
public:
SiBuilder_c ( const CSphIndex & tIndex, MergeCb_c & tMonitor, int64_t iNumDocs, CSphString & sError );
bool Build();
StrVec_t GetOldFiles() const { return m_dOldFiles; }
StrVec_t GetNewFiles() const { return m_dNewFiles; }
private:
const CSphIndex & m_tIndex;
MergeCb_c & m_tMonitor;
int64_t m_iNumDocs;
CSphString & m_sError;
StrVec_t m_dOldFiles;
StrVec_t m_dNewFiles;
CSphVector<PlainOrColumnar_t> m_dSiAttrs;
std::unique_ptr<SI::Builder_i> m_pSIdxBuilder;
std::unique_ptr<JsonSIBuilder_i> m_pJsonSIBuilder;
bool ProcessPureColumnarAttributes();
bool ProcessMixedAttributes();
};
SiBuilder_c::SiBuilder_c ( const CSphIndex & tIndex, MergeCb_c & tMonitor, int64_t iNumDocs, CSphString & sError )
: m_tIndex ( tIndex )
, m_tMonitor ( tMonitor )
, m_iNumDocs ( iNumDocs )
, m_sError ( sError )
{}
bool SiBuilder_c::ProcessPureColumnarAttributes()
{
assert ( !m_tIndex.GetRawAttrs() );
assert ( m_tIndex.GetMatchSchema().GetAttr ( 0 ).IsColumnar() );
auto dColumnarIterators = CreateAllColumnarIterators ( m_tIndex.GetColumnar(), m_tIndex.GetMatchSchema() );
CSphVector<int64_t> dTmp;
int iChunk = m_tIndex.m_iChunk;
m_tMonitor.SetEvent ( MergeCb_c::E_MERGEATTRS_START, iChunk );
AT_SCOPE_EXIT ( [this, iChunk] { m_tMonitor.SetEvent ( MergeCb_c::E_MERGEATTRS_FINISHED, iChunk ); } );
for ( RowID_t tRowID = 0; tRowID<(RowID_t)m_iNumDocs; ++tRowID )
{
if ( m_tMonitor.NeedStop() )
return false;
m_pSIdxBuilder->SetRowID ( tRowID );
BuildStoreSI ( tRowID, nullptr, nullptr, dColumnarIterators, m_dSiAttrs, m_pSIdxBuilder.get(), dTmp );
}
return true;
}
bool SiBuilder_c::ProcessMixedAttributes()
{
auto dColumnarIterators = CreateAllColumnarIterators ( m_tIndex.GetColumnar(), m_tIndex.GetMatchSchema() );
CSphVector<int64_t> dTmp;
const CSphRowitem * pRow = m_tIndex.GetRawAttrs();
assert(pRow);
const BYTE * pBlobRow = m_tIndex.GetRawBlobAttrs();
const CSphSchema & tSchema = m_tIndex.GetMatchSchema();
const CSphColumnInfo * pBlobLoc = tSchema.GetAttr ( sphGetBlobLocatorName() );
int iStride = tSchema.GetRowSize();
int iNumBlobAttrs = 0;
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
if ( tSchema.GetAttr(i).m_tLocator.IsBlobAttr() )
iNumBlobAttrs++;
int iChunk = m_tIndex.m_iChunk;
m_tMonitor.SetEvent ( MergeCb_c::E_MERGEATTRS_START, iChunk );
AT_SCOPE_EXIT ( [this, iChunk] { m_tMonitor.SetEvent ( MergeCb_c::E_MERGEATTRS_FINISHED, iChunk ); } );
for ( RowID_t tRowID = 0; tRowID<(RowID_t)m_iNumDocs; ++tRowID, pRow += iStride )
{
if ( m_tMonitor.NeedStop() )
return false;
m_pSIdxBuilder->SetRowID(tRowID);
BuildStoreSI ( tRowID, pRow, m_tIndex.GetRawBlobAttrs(), dColumnarIterators, m_dSiAttrs, m_pSIdxBuilder.get(), dTmp );
if ( m_pJsonSIBuilder )
{
assert ( pBlobRow && pBlobLoc );
SphAttr_t tBlobRowOffset = sphGetRowAttr ( pRow, pBlobLoc->m_tLocator );
m_pJsonSIBuilder->AddRowOffsetSize ( { tBlobRowOffset, sphGetBlobTotalLen ( pBlobRow+tBlobRowOffset, iNumBlobAttrs ) } );
}
}
return true;
}
bool SiBuilder_c::Build()
{
if ( !IsSecondaryLibLoaded() )
{
m_sError = "secondary index library not loaded";
return false;
}
CSphString sSPIDX = m_tIndex.GetTmpFilename ( SPH_EXT_SPIDX );
CSphString sSPJIDX = m_tIndex.GetTmpFilename ( SPH_EXT_SPJIDX );
m_dNewFiles.Add(sSPIDX);
m_dOldFiles.Add ( m_tIndex.GetFilename ( SPH_EXT_SPIDX ) );
BuildBufferSettings_t tSettings; // use default buffer settings
m_pSIdxBuilder = CreateIndexBuilder ( tSettings.m_iSIMemLimit, m_tIndex.GetMatchSchema(), sSPIDX, m_dSiAttrs, tSettings.m_iBufferStorage, m_sError );
if ( !m_pSIdxBuilder )
return false;
if ( m_tIndex.GetMatchSchema().HasJsonSIAttrs() )
{
m_dNewFiles.Add(sSPJIDX);
m_dOldFiles.Add ( m_tIndex.GetFilename ( SPH_EXT_SPJIDX ) );
m_pJsonSIBuilder = CreateJsonSIBuilder ( m_tIndex.GetMatchSchema(), m_tIndex.GetFilename(SPH_EXT_SPB), sSPJIDX, m_sError );
if ( !m_pJsonSIBuilder )
return false;
}
bool bOk = false;
if ( !m_tIndex.GetRawAttrs() )
bOk = ProcessPureColumnarAttributes();
else
bOk = ProcessMixedAttributes();
if ( !bOk )
return false;
std::string sError;
if ( !m_pSIdxBuilder->Done(sError) )
{
m_sError = sError.c_str();
return false;
}
if ( m_pJsonSIBuilder && !m_pJsonSIBuilder->Done(m_sError) )
return false;
return true;
}
bool SiRecreate ( MergeCb_c & tMonitor, const CSphIndex & tIndex, int64_t iNumDocs, StrVec_t & dOldFiles, StrVec_t & dNewFiles, CSphString & sError )
{
SiBuilder_c tBuilder ( tIndex, tMonitor, iNumDocs, sError );
if ( !tBuilder.Build() )
return false;
dOldFiles = tBuilder.GetOldFiles();
dNewFiles = tBuilder.GetNewFiles();
return true;
}
| 17,481
|
C++
|
.cpp
| 423
| 38.780142
| 215
| 0.726849
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,886
|
searchdtask.cpp
|
manticoresoftware_manticoresearch/src/searchdtask.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "searchdtask.h"
#include "coroutine.h"
#include "mini_timer.h"
#ifndef VERBOSE_TASKMANAGER
#define VERBOSE_TASKMANAGER 0
#endif
#if VERBOSE_TASKMANAGER
#define LOG_LEVEL_TSK true
#else
#define LOG_LEVEL_TSK false
#endif
#define LOG_COMPONENT_TSKX "X "
#define INFOX LOGMSG ( INFO, TSK, TSKX )
#define DEBUGX LOGMSG ( DEBUG, TSK, TSKX )
#define WARNX LOGMSG ( WARNING, TSK, TSKX )
// Max num of task flavours (we allocate fixed vec of this size)
// since we have only 7 different tasks for now, pool of 32 slots seems to be enough
constexpr int NUM_TASKS = 32;
//////////////////////////////////////////////////////////////////////////
// Tasks (job classes)
//////////////////////////////////////////////////////////////////////////
static TaskManager::TaskInfo_t g_Tasks [ NUM_TASKS ];
static std::atomic<int> g_iTasks {0};
// wrap naked executor into statistic collector
Threads::Handler AttachClass ( TaskID iTask, Threads::Handler&& fnWorker )
{
return [iTask, fnWorker=std::move(fnWorker)] () {
Threads::JobTracker_t dTrack;
auto& tInfo = g_Tasks[iTask];
INFOX << "Task " << tInfo.m_sName << " started";
tInfo.m_iCurrentRunners.fetch_add ( 1, std::memory_order_relaxed );
auto itmStart = sphMicroTimer();
std::atomic_thread_fence ( std::memory_order_acquire );
fnWorker();
std::atomic_thread_fence ( std::memory_order_release );
auto itmEnd = sphMicroTimer();
tInfo.m_iCurrentRunners.fetch_sub ( 1, std::memory_order_relaxed );
tInfo.m_iAllRunners.fetch_sub ( 1, std::memory_order_relaxed );
tInfo.m_iTotalRun.fetch_add ( 1, std::memory_order_relaxed );
tInfo.m_iLastFinished.store ( itmEnd, std::memory_order_relaxed );
tInfo.m_iTotalSpent.fetch_add ( itmEnd - itmStart, std::memory_order_relaxed );
INFOX << "Task " << tInfo.m_sName << " finished";
};
}
void TaskManager::StartJob ( TaskID iTask, Threads::Handler fnJob )
{
assert ( iTask <= g_iTasks.load ( std::memory_order_relaxed ) && iTask >= 0 );
auto& tInfo = g_Tasks[iTask];
auto iAllRunners = tInfo.m_iAllRunners.load ( std::memory_order_relaxed );
if ( sphInterrupted() )
{
INFOX << "Drop job (id=" << iTask << " \"" << tInfo.m_sName << "\"), since interrupted";
tInfo.m_iTotalDropped.fetch_add ( 1, std::memory_order_relaxed );
return;
}
if ( tInfo.m_iMaxRunners > 0 && iAllRunners >= tInfo.m_iMaxRunners )
{
INFOX << "Drop job (id=" << iTask << " \"" << tInfo.m_sName << "\"), since " << iAllRunners << " is running/enqueued";
tInfo.m_iTotalDropped.fetch_add ( 1, std::memory_order_relaxed );
return;
}
INFOX << "StartJob (id=" << iTask << " \"" << tInfo.m_sName << "\")";
tInfo.m_iAllRunners.fetch_add ( 1, std::memory_order_relaxed );
Threads::StartJob ( AttachClass ( iTask, std::move ( fnJob ) ) );
}
TaskID TaskManager::RegisterGlobal ( CSphString sName, int iThreads )
{
auto iTaskID = TaskID ( g_iTasks.fetch_add ( 1, std::memory_order_relaxed ) );
if ( !iTaskID ) // this is first class; start log timering
TimePrefixed::TimeStart();
INFOX << "Task \"" << sName << "\" registered with id=" << iTaskID << ", running max " << iThreads << " jobs a time" << (iThreads?"":" (0=unlimited)");
auto& dInfo = g_Tasks[iTaskID];
dInfo.m_sName = std::move ( sName );
dInfo.m_iMaxRunners = iThreads;
return iTaskID;
}
void TaskManager::ScheduleJob ( TaskID iTask, int64_t iTimeStampUS, Threads::Handler fnJob )
{
INFOX << "ScheduleJob (id=" << iTask << ", \"" << g_Tasks[iTask].m_sName << "\", start " << timestamp_t ( iTimeStampUS ) << ")";
assert ( iTimeStampUS > 0 );
auto pTimer = new MiniTimer_c ( g_Tasks[iTask].m_sName.cstr() );
pTimer->EngageAt ( iTimeStampUS, [pTimer, iTask, fnJob = std::move ( fnJob )]() mutable { TaskManager::StartJob ( iTask, std::move ( fnJob ) ); delete pTimer; } );
}
VecTraits_T<TaskManager::TaskInfo_t> TaskManager::GetTaskInfo ()
{
return { g_Tasks, g_iTasks.load ( std::memory_order_relaxed ) };
}
| 4,372
|
C++
|
.cpp
| 98
| 42.72449
| 164
| 0.67113
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,887
|
sphinxquery.cpp
|
manticoresoftware_manticoresearch/src/sphinxquery.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxquery.h"
#include "sphinxutils.h"
#include "sphinxplugin.h"
#include <stdarg.h>
#include "tokenizer/tokenizer.h"
#include "dict/dict_base.h"
//////////////////////////////////////////////////////////////////////////
// EXTENDED PARSER RELOADED
//////////////////////////////////////////////////////////////////////////
class XQParser_t;
#include "bissphinxquery.h"
// #define XQDEBUG 1
// #define XQ_DUMP_TRANSFORMED_TREE 1
// #define XQ_DUMP_NODE_ADDR 1
static bool g_bOnlyNotAllowed = false;
void AllowOnlyNot ( bool bAllowed )
{
g_bOnlyNotAllowed = bAllowed;
}
bool IsAllowOnlyNot()
{
return g_bOnlyNotAllowed;
}
NodeEstimate_t & NodeEstimate_t::operator+= ( const NodeEstimate_t & tRhs )
{
m_fCost += tRhs.m_fCost;
m_iDocs += tRhs.m_iDocs;
m_iTerms += tRhs.m_iTerms;
return *this;
}
static void FixupMorphOnlyFields ( XQNode_t * pNode, const CSphBitvec * pMorphFields );
//////////////////////////////////////////////////////////////////////////
void XQParseHelper_c::SetString ( const char * szString )
{
assert ( m_pTokenizer );
m_pTokenizer->SetBuffer ( (const BYTE*)szString, (int) strlen(szString) );
m_iAtomPos = 0;
}
/// lookup field and add it into mask
bool XQParseHelper_c::AddField ( FieldMask_t & dFields, const char * szField, int iLen )
{
if ( !szField || !iLen )
return Error ( "empty field passed to AddField()" );
CSphString sField;
sField.SetBinary ( szField, iLen );
int iField = m_pSchema->GetFieldIndex ( sField.cstr () );
if ( iField<0 )
{
if ( m_bStopOnInvalid )
return Error ( "no field '%s' found in schema", sField.cstr () );
else
Warning ( "no field '%s' found in schema", sField.cstr () );
} else
{
if ( iField>=SPH_MAX_FIELDS )
return Error ( " max %d fields allowed", SPH_MAX_FIELDS );
dFields.Set ( iField );
}
return true;
}
/// parse fields block
bool XQParseHelper_c::ParseFields ( FieldMask_t & dFields, int & iMaxFieldPos, bool & bIgnore )
{
dFields.UnsetAll();
iMaxFieldPos = 0;
bIgnore = false;
const char * pPtr = m_pTokenizer->GetBufferPtr ();
const char * pLastPtr = m_pTokenizer->GetBufferEnd ();
if ( pPtr==pLastPtr )
return true; // silently ignore trailing field operator
bool bNegate = false;
bool bBlock = false;
// handle special modifiers
if ( *pPtr=='!' )
{
// handle @! and @!(
bNegate = true; ++pPtr;
if ( *pPtr=='(' ) { bBlock = true; ++pPtr; }
} else if ( *pPtr=='*' )
{
// handle @*
dFields.SetAll();
m_pTokenizer->SetBufferPtr ( pPtr+1 );
return true;
} else if ( HandleSpecialFields ( pPtr, dFields ) )
return true;
else
bBlock = HandleFieldBlockStart ( pPtr );
// handle invalid chars
if ( !sphIsAlpha(*pPtr) )
{
bIgnore = true;
m_pTokenizer->SetBufferPtr ( pPtr ); // ignore and re-parse (FIXME! maybe warn?)
return true;
}
assert ( sphIsAlpha(*pPtr) ); // i think i'm paranoid
// handle field specification
if ( !bBlock )
{
// handle standalone field specification
const char * pFieldStart = pPtr;
while ( sphIsAlpha(*pPtr) && pPtr<pLastPtr )
++pPtr;
assert ( pPtr-pFieldStart>0 );
if ( !AddField ( dFields, pFieldStart, int ( pPtr-pFieldStart ) ) )
return false;
m_pTokenizer->SetBufferPtr ( pPtr );
if ( bNegate )
dFields.Negate();
} else
{
// handle fields block specification
assert ( sphIsAlpha(*pPtr) && bBlock ); // and complicated
bool bOK = false;
const char * pFieldStart = nullptr;
while ( pPtr<pLastPtr )
{
// accumulate field name, while we can
if ( sphIsAlpha(*pPtr) || *pPtr=='.' )
{
if ( !pFieldStart )
pFieldStart = pPtr;
++pPtr;
continue;
}
// separator found
if ( !pFieldStart )
{
CSphString sContext;
sContext.SetBinary ( pPtr, (int)( pLastPtr-pPtr ) );
return Error ( "error parsing field list: invalid field block operator syntax near '%s'", sContext.cstr() );
} else if ( *pPtr==',' )
{
if ( !AddField ( dFields, pFieldStart, int ( pPtr-pFieldStart ) ) )
return false;
pFieldStart = nullptr;
pPtr++;
} else if ( *pPtr==')' )
{
if ( !AddField ( dFields, pFieldStart, int ( pPtr-pFieldStart ) ) )
return false;
m_pTokenizer->SetBufferPtr ( ++pPtr );
if ( bNegate )
dFields.Negate();
bOK = true;
break;
} else
{
return Error ( "error parsing field list: invalid character '%c' in field block operator", *pPtr );
}
}
if ( !bOK )
{
if ( NeedTrailingSeparator() )
return Error ( "error parsing field list: missing closing ')' in field block operator" );
else
{
if ( !AddField ( dFields, pFieldStart, int ( pPtr-pFieldStart ) ) )
return false;
if ( bNegate )
dFields.Negate();
return true;
}
}
}
// handle optional position range modifier
if ( pPtr[0]=='[' && isdigit ( pPtr[1] ) )
{
// skip '[' and digits
const char * p = pPtr+1;
while ( *p && isdigit(*p) ) p++;
// check that the range ends with ']' (FIXME! maybe report an error if it does not?)
if ( *p!=']' )
return true;
// fetch my value
iMaxFieldPos = strtoul ( pPtr+1, NULL, 10 );
m_pTokenizer->SetBufferPtr ( p+1 );
}
// well done
return true;
}
void XQParseHelper_c::Setup ( const CSphSchema * pSchema, TokenizerRefPtr_c pTokenizer, DictRefPtr_c pDict, XQQuery_t * pXQQuery, const CSphIndexSettings & tSettings )
{
m_pSchema = pSchema;
m_pTokenizer = std::move ( pTokenizer );
m_pDict = std::move (pDict);
m_pParsed = pXQQuery;
m_iAtomPos = 0;
m_bEmptyStopword = ( tSettings.m_iStopwordStep==0 );
}
bool XQParseHelper_c::Error ( const char * sTemplate, ... )
{
assert ( m_pParsed );
char sBuf[256];
const char * sPrefix = "query error: ";
auto iPrefix = strlen(sPrefix);
memcpy ( sBuf, sPrefix, iPrefix );
va_list ap;
va_start ( ap, sTemplate );
vsnprintf ( sBuf+iPrefix, sizeof(sBuf)-iPrefix, sTemplate, ap );
va_end ( ap );
m_bError = true;
m_pParsed->m_sParseError = sBuf;
return false;
}
void XQParseHelper_c::Warning ( const char * sTemplate, ... )
{
assert ( m_pParsed );
char sBuf[256];
const char * sPrefix = "query warning: ";
auto iPrefix = strlen(sPrefix);
memcpy ( sBuf, sPrefix, iPrefix );
va_list ap;
va_start ( ap, sTemplate );
vsnprintf ( sBuf+iPrefix, sizeof(sBuf)-iPrefix, sTemplate, ap );
va_end ( ap );
m_pParsed->m_sParseWarning = sBuf;
}
void XQParseHelper_c::Cleanup()
{
m_dSpawned.Uniq(); // FIXME! should eliminate this by testing
ARRAY_FOREACH ( i, m_dSpawned )
{
m_dSpawned[i]->m_dChildren.Reset ();
SafeDelete ( m_dSpawned[i] );
}
m_dSpawned.Reset ();
}
bool XQParseHelper_c::CheckQuorumProximity ( XQNode_t * pNode )
{
if ( !pNode )
return true;
bool bQuorumPassed = ( pNode->GetOp()!=SPH_QUERY_QUORUM ||
( pNode->m_iOpArg>0 && ( !pNode->m_bPercentOp || pNode->m_iOpArg<=100 ) ) );
if ( !bQuorumPassed )
{
if ( pNode->m_bPercentOp )
return Error ( "quorum threshold out of bounds 0.0 and 1.0f (%f)", 1.0f / 100.0f * pNode->m_iOpArg );
else
return Error ( "quorum threshold too low (%d)", pNode->m_iOpArg );
}
if ( pNode->GetOp()==SPH_QUERY_PROXIMITY && pNode->m_iOpArg<1 )
return Error ( "proximity threshold too low (%d)", pNode->m_iOpArg );
return pNode->m_dChildren.all_of ( [&] ( XQNode_t * pChild ) { return CheckQuorumProximity(pChild); } );
}
static void FixupDegenerates ( XQNode_t * pNode, CSphString & sWarning )
{
if ( !pNode )
return;
if ( pNode->m_dWords.GetLength()==1 &&
( pNode->GetOp()==SPH_QUERY_PHRASE || pNode->GetOp()==SPH_QUERY_PROXIMITY || pNode->GetOp()==SPH_QUERY_QUORUM ) )
{
if ( pNode->GetOp()==SPH_QUERY_QUORUM && !pNode->m_bPercentOp && pNode->m_iOpArg>1 )
sWarning.SetSprintf ( "quorum threshold too high (words=%d, thresh=%d); replacing quorum operator with AND operator", pNode->m_dWords.GetLength(), pNode->m_iOpArg );
pNode->SetOp ( SPH_QUERY_AND );
return;
}
ARRAY_FOREACH ( i, pNode->m_dChildren )
FixupDegenerates ( pNode->m_dChildren[i], sWarning );
}
static XQNode_t * TransformOnlyNot ( XQNode_t * pNode, CSphVector<XQNode_t *> & dSpawned )
{
XQNode_t * pScan = new XQNode_t ( pNode->m_dSpec );
pScan->SetOp ( SPH_QUERY_SCAN );
dSpawned.Add ( pScan );
XQNode_t * pAnd = new XQNode_t ( pNode->m_dSpec );
pAnd->SetOp ( SPH_QUERY_ANDNOT, pScan, pNode );
dSpawned.Add ( pScan );
return pAnd;
}
static XQNode_t * FixupNot ( XQNode_t * pNode, CSphVector<XQNode_t *> & dSpawned )
{
pNode->SetOp ( SPH_QUERY_AND );
return TransformOnlyNot ( pNode, dSpawned );
}
XQNode_t * XQParseHelper_c::FixupTree ( XQNode_t * pRoot, const XQLimitSpec_t & tLimitSpec, const CSphBitvec * pMorphFields, bool bOnlyNotAllowed )
{
FixupDestForms ();
DeleteNodesWOFields ( pRoot );
pRoot = SweepNulls ( pRoot, bOnlyNotAllowed );
FixupDegenerates ( pRoot, m_pParsed->m_sParseWarning );
FixupMorphOnlyFields ( pRoot, pMorphFields );
FixupNulls ( pRoot );
if ( !FixupNots ( pRoot, bOnlyNotAllowed, &pRoot ) )
{
Cleanup ();
return NULL;
}
if ( !CheckQuorumProximity ( pRoot ) )
{
Cleanup();
return NULL;
}
if ( pRoot && pRoot->GetOp()==SPH_QUERY_NOT )
{
if ( bOnlyNotAllowed )
{
pRoot = FixupNot ( pRoot, m_dSpawned );
} else if ( !pRoot->m_iOpArg )
{
Cleanup();
Error ( "query is non-computable (single NOT operator)" );
return NULL;
}
}
// all ok; might want to create a dummy node to indicate that
m_dSpawned.Reset();
if ( pRoot && pRoot->GetOp()==SPH_QUERY_NULL )
SafeDelete ( pRoot );
return pRoot ? pRoot : new XQNode_t ( tLimitSpec );
}
XQNode_t * XQParseHelper_c::SweepNulls ( XQNode_t * pNode, bool bOnlyNotAllowed )
{
if ( !pNode )
return NULL;
// sweep plain node
if ( pNode->m_dWords.GetLength() )
{
ARRAY_FOREACH ( i, pNode->m_dWords )
if ( pNode->m_dWords[i].m_sWord.cstr()==NULL )
pNode->m_dWords.Remove ( i-- );
if ( pNode->m_dWords.GetLength()==0 )
{
m_dSpawned.RemoveValue ( pNode ); // OPTIMIZE!
SafeDelete ( pNode );
return NULL;
}
return pNode;
}
// sweep op node
ARRAY_FOREACH ( i, pNode->m_dChildren )
{
pNode->m_dChildren[i] = SweepNulls ( pNode->m_dChildren[i], bOnlyNotAllowed );
if ( pNode->m_dChildren[i]==NULL )
{
pNode->m_dChildren.Remove ( i-- );
// use non-null iOpArg as a flag indicating that the sweeping happened.
++pNode->m_iOpArg;
}
}
if ( pNode->m_dChildren.GetLength()==0 )
{
m_dSpawned.RemoveValue ( pNode ); // OPTIMIZE!
SafeDelete ( pNode );
return NULL;
}
// remove redundancies if needed
if ( pNode->GetOp()!=SPH_QUERY_NOT && pNode->m_dChildren.GetLength()==1 )
{
XQNode_t * pRet = pNode->m_dChildren[0];
pNode->m_dChildren.Reset ();
pRet->m_pParent = pNode->m_pParent;
// expressions like 'la !word' (having min_word_len>len(la)) became a 'null' node.
if ( pNode->m_iOpArg && pRet->GetOp()==SPH_QUERY_NOT && !bOnlyNotAllowed )
{
pRet->SetOp ( SPH_QUERY_NULL );
ARRAY_FOREACH ( i, pRet->m_dChildren )
{
m_dSpawned.RemoveValue ( pRet->m_dChildren[i] );
SafeDelete ( pRet->m_dChildren[i] );
}
pRet->m_dChildren.Reset();
}
pRet->m_iOpArg = pNode->m_iOpArg;
m_dSpawned.RemoveValue ( pNode ); // OPTIMIZE!
SafeDelete ( pNode );
return SweepNulls ( pRet, bOnlyNotAllowed );
}
// done
return pNode;
}
void XQParseHelper_c::FixupNulls ( XQNode_t * pNode )
{
if ( !pNode )
return;
ARRAY_FOREACH ( i, pNode->m_dChildren )
FixupNulls ( pNode->m_dChildren[i] );
// smth OR null == smth.
if ( pNode->GetOp()==SPH_QUERY_OR )
{
CSphVector<XQNode_t*> dNotNulls;
ARRAY_FOREACH ( i, pNode->m_dChildren )
{
XQNode_t* pChild = pNode->m_dChildren[i];
if ( pChild->GetOp()!=SPH_QUERY_NULL )
dNotNulls.Add ( pChild );
else
{
m_dSpawned.RemoveValue ( pChild );
SafeDelete ( pChild );
}
}
pNode->m_dChildren.SwapData ( dNotNulls );
dNotNulls.Reset();
// smth AND null = null.
} else if ( pNode->GetOp()==SPH_QUERY_AND )
{
if ( pNode->m_dChildren.any_of (
[] ( XQNode_t * pChild ) { return pChild->GetOp ()==SPH_QUERY_NULL; } ) )
{
pNode->SetOp ( SPH_QUERY_NULL );
for ( auto &pChild : pNode->m_dChildren )
{
m_dSpawned.RemoveValue ( pChild );
SafeDelete ( pChild );
}
pNode->m_dChildren.Reset ();
}
}
}
bool XQParseHelper_c::FixupNots ( XQNode_t * pNode, bool bOnlyNotAllowed, XQNode_t ** ppRoot )
{
// no processing for plain nodes
if ( !pNode || !pNode->m_dWords.IsEmpty() )
return true;
// query was already transformed
if ( pNode->GetOp()==SPH_QUERY_ANDNOT )
return true;
// process 'em children
for ( auto& dNode : pNode->m_dChildren )
if ( !FixupNots ( dNode, bOnlyNotAllowed, ppRoot ) )
return false;
// extract NOT subnodes
CSphVector<XQNode_t*> dNots;
ARRAY_FOREACH ( i, pNode->m_dChildren )
if ( pNode->m_dChildren[i]->GetOp()==SPH_QUERY_NOT )
{
dNots.Add ( pNode->m_dChildren[i] );
pNode->m_dChildren.RemoveFast ( i-- );
}
// no NOTs? we're square
if ( !dNots.GetLength() )
return true;
// nothing but NOTs? we can't compute that
if ( !pNode->m_dChildren.GetLength() && !bOnlyNotAllowed )
return Error ( "query is non-computable (node consists of NOT operators only)" );
// NOT within OR or MAYBE? we can't compute that
if ( pNode->GetOp()==SPH_QUERY_OR || pNode->GetOp()==SPH_QUERY_MAYBE || pNode->GetOp()==SPH_QUERY_NEAR )
{
XQOperator_e eOp = pNode->GetOp();
const char * sOp = ( eOp==SPH_QUERY_OR ? "OR" : ( eOp==SPH_QUERY_MAYBE ? "MAYBE" : "NEAR" ) );
return Error ( "query is non-computable (NOT is not allowed within %s)", sOp );
}
// NOT used in before operator
if ( pNode->GetOp()==SPH_QUERY_BEFORE )
return Error ( "query is non-computable (NOT cannot be used as before operand)" );
// must be some NOTs within AND at this point, convert this node to ANDNOT
assert ( ( ( pNode->GetOp()==SPH_QUERY_AND && pNode->m_dChildren.GetLength() )
|| ( pNode->GetOp()==SPH_QUERY_AND && !pNode->m_dChildren.GetLength() && bOnlyNotAllowed ) )
&& dNots.GetLength() );
if ( pNode->GetOp()==SPH_QUERY_AND && !pNode->m_dChildren.GetLength() )
{
if ( !bOnlyNotAllowed )
return Error ( "query is non-computable (node consists of NOT operators only)" );
if ( !pNode->m_pParent )
{
pNode->SetOp ( SPH_QUERY_OR, dNots );
*ppRoot = TransformOnlyNot ( pNode, m_dSpawned );
return true;
} else if ( pNode->m_pParent && pNode->m_pParent->GetOp()==SPH_QUERY_AND && pNode->m_pParent->m_dChildren.GetLength()==2 )
{
pNode->m_pParent->SetOp ( SPH_QUERY_ANDNOT );
pNode->SetOp ( SPH_QUERY_OR, dNots );
return true;
} else
{
return Error ( "query is non-computable (node consists of NOT operators only)" );
}
}
XQNode_t * pAnd = new XQNode_t ( pNode->m_dSpec );
pAnd->SetOp ( SPH_QUERY_AND, pNode->m_dChildren );
m_dSpawned.Add ( pAnd );
XQNode_t * pNot = NULL;
if ( dNots.GetLength()==1 )
{
pNot = dNots[0];
} else
{
pNot = new XQNode_t ( pNode->m_dSpec );
pNot->SetOp ( SPH_QUERY_OR, dNots );
m_dSpawned.Add ( pNot );
}
pNode->SetOp ( SPH_QUERY_ANDNOT, pAnd, pNot );
return true;
}
static void CollectChildren ( XQNode_t * pNode, CSphVector<XQNode_t *> & dChildren )
{
if ( pNode->m_dChildren.IsEmpty() )
return;
dChildren.Add ( pNode );
for ( const XQNode_t *pChild : dChildren )
for ( const auto& dChild : pChild->m_dChildren )
dChildren.Add ( dChild );
}
void XQParseHelper_c::DeleteNodesWOFields ( XQNode_t * pNode )
{
if ( !pNode )
return;
for ( int i = 0; i < pNode->m_dChildren.GetLength (); )
{
if ( pNode->m_dChildren[i]->m_dSpec.m_dFieldMask.TestAll ( false ) )
{
XQNode_t * pChild = pNode->m_dChildren[i];
CSphVector<XQNode_t *> dChildren;
CollectChildren ( pChild, dChildren );
#ifndef NDEBUG
bool bAllEmpty = dChildren.all_of ( [] ( XQNode_t * pChildNode ) { return pChildNode->m_dSpec.m_dFieldMask.TestAll ( false ); } );
assert ( pChild->m_dChildren.GetLength()==0 || ( dChildren.GetLength() && bAllEmpty ) );
#endif
if ( dChildren.GetLength() )
{
ARRAY_FOREACH ( iChild, dChildren )
m_dSpawned.RemoveValue ( dChildren[iChild] );
} else
m_dSpawned.RemoveValue ( pChild );
// this should be a leaf node
SafeDelete ( pNode->m_dChildren[i] );
pNode->m_dChildren.RemoveFast ( i );
} else
{
DeleteNodesWOFields ( pNode->m_dChildren[i] );
i++;
}
}
}
void XQParseHelper_c::FixupDestForms ()
{
if ( !m_dMultiforms.GetLength() )
return;
CSphVector<XQNode_t *> dForms;
ARRAY_FOREACH ( iNode, m_dMultiforms )
{
const MultiformNode_t & tDesc = m_dMultiforms[iNode];
XQNode_t * pMultiParent = tDesc.m_pNode;
assert ( pMultiParent->m_dWords.GetLength()==1 && pMultiParent->m_dChildren.GetLength()==0 );
XQKeyword_t tKeyword;
Swap ( pMultiParent->m_dWords[0], tKeyword );
pMultiParent->m_dWords.Reset();
// FIXME: what about whildcard?
bool bExact = ( tKeyword.m_sWord.Length()>1 && tKeyword.m_sWord.cstr()[0]=='=' );
bool bFieldStart = tKeyword.m_bFieldStart;
bool bFieldEnd = tKeyword.m_bFieldEnd;
tKeyword.m_bFieldStart = false;
tKeyword.m_bFieldEnd = false;
XQNode_t * pMultiHead = new XQNode_t ( pMultiParent->m_dSpec );
pMultiHead->m_dWords.Add ( tKeyword );
m_dSpawned.Add ( pMultiHead );
dForms.Add ( pMultiHead );
for ( int iForm=0; iForm<tDesc.m_iDestCount; iForm++ )
{
tKeyword.m_iAtomPos++;
tKeyword.m_sWord = m_dDestForms [ tDesc.m_iDestStart + iForm ];
// propagate exact word flag to all destination forms
if ( bExact )
tKeyword.m_sWord.SetSprintf ( "=%s", tKeyword.m_sWord.cstr() );
XQNode_t * pMulti = new XQNode_t ( pMultiParent->m_dSpec );
pMulti->m_dWords.Add ( tKeyword );
m_dSpawned.Add ( pMulti );
dForms.Add ( pMulti );
}
// fix-up field start\end modifier
dForms[0]->m_dWords[0].m_bFieldStart = bFieldStart;
dForms.Last()->m_dWords[0].m_bFieldEnd = bFieldEnd;
pMultiParent->SetOp ( SPH_QUERY_AND, dForms );
dForms.Resize ( 0 );
}
}
void XQParseHelper_c::SetZone ( const StrVec_t & dZones )
{
assert ( m_pParsed );
m_pParsed->m_dZones = dZones;
}
const StrVec_t & XQParseHelper_c::GetZone() const
{
assert ( m_pParsed );
return m_pParsed->m_dZones;
}
static void TransformMorphOnlyFields ( XQNode_t * pNode, const CSphBitvec & tMorphDisabledFields )
{
if ( !pNode )
return;
ARRAY_FOREACH ( i, pNode->m_dChildren )
TransformMorphOnlyFields ( pNode->m_dChildren[i], tMorphDisabledFields );
if ( pNode->m_dSpec.IsEmpty () || pNode->m_dWords.IsEmpty () )
return;
const XQLimitSpec_t & tSpec = pNode->m_dSpec;
if ( tSpec.m_bFieldSpec && !tSpec.m_dFieldMask.TestAll ( true ) )
{
int iField=tMorphDisabledFields.Scan ( 0 );
while ( iField<tMorphDisabledFields.GetSize() )
{
if ( pNode->m_dSpec.m_dFieldMask.Test ( iField ) )
{
pNode->m_dWords.for_each ( [] ( XQKeyword_t & tKw )
{
if ( !tKw.m_sWord.IsEmpty() && !tKw.m_sWord.Begins( "=" ) && !tKw.m_sWord.Begins("*") && !tKw.m_sWord.Ends("*") )
tKw.m_sWord.SetSprintf ( "=%s", tKw.m_sWord.cstr() );
});
}
if ( ( iField+1 )<tMorphDisabledFields.GetSize() )
iField=tMorphDisabledFields.Scan ( iField+1 );
else
break;
}
}
}
static void FixupMorphOnlyFields ( XQNode_t * pNode, const CSphBitvec * pMorphFields )
{
if ( !pNode || !pMorphFields || pMorphFields->IsEmpty() )
return;
// set the only fields with the morphology_skip_fields option to use bitvec::scan
CSphBitvec tMorphDisabledFields ( *pMorphFields );
tMorphDisabledFields.Negate();
TransformMorphOnlyFields ( pNode, tMorphDisabledFields );
}
//////////////////////////////////////////////////////////////////////////
class XQParser_t : public XQParseHelper_c
{
friend void yyerror ( XQParser_t * pParser, const char * sMessage );
friend int yyparse (XQParser_t * pParser);
public:
XQParser_t();
~XQParser_t() override;
public:
bool Parse ( XQQuery_t & tQuery, const char * sQuery, const CSphQuery * pQuery, const TokenizerRefPtr_c & pTokenizer, const CSphSchema * pSchema, const DictRefPtr_c & pDict, const CSphIndexSettings & tSettings, const CSphBitvec * pMorphFields );
int ParseZone ( const char * pZone );
bool IsSpecial ( char c );
bool GetNumber ( const char * p, const char * sRestart );
int GetToken ( YYSTYPE * lvalp );
void HandleModifiers ( XQKeyword_t & tKeyword );
void AddQuery ( XQNode_t * pNode );
XQNode_t * AddKeyword ( const char * sKeyword, int iSkippedPosBeforeToken=0 );
XQNode_t * AddKeyword ( XQNode_t * pLeft, XQNode_t * pRight );
XQNode_t * AddOp ( XQOperator_e eOp, XQNode_t * pLeft, XQNode_t * pRight, int iOpArg=0 );
void SetPhrase ( XQNode_t * pNode, bool bSetExact );
void PhraseShiftQpos ( XQNode_t * pNode );
virtual void Cleanup () override;
inline void SetFieldSpec ( const FieldMask_t& uMask, int iMaxPos )
{
FixRefSpec();
m_dStateSpec.Last()->SetFieldSpec ( uMask, iMaxPos );
}
inline void SetZoneVec ( int iZoneVec, bool bZoneSpan = false )
{
FixRefSpec();
m_dStateSpec.Last()->SetZoneSpec ( m_dZoneVecs[iZoneVec], bZoneSpan );
}
inline void FixRefSpec ()
{
bool bRef = ( m_dStateSpec.GetLength()>1 && ( m_dStateSpec[m_dStateSpec.GetLength()-1]==m_dStateSpec[m_dStateSpec.GetLength()-2] ) );
if ( !bRef )
return;
XQLimitSpec_t * pSpec = m_dStateSpec.Pop();
m_dSpecPool.Add ( new XQLimitSpec_t ( *pSpec ) );
m_dStateSpec.Add ( m_dSpecPool.Last() );
}
public:
const CSphVector<int> & GetZoneVec ( int iZoneVec ) const
{
return m_dZoneVecs[iZoneVec];
}
public:
BYTE * m_sQuery = nullptr;
int m_iQueryLen = 0;
const char * m_pErrorAt = nullptr;
XQNode_t * m_pRoot = nullptr;
int m_iPendingNulls = 0;
int m_iPendingType = 0;
YYSTYPE m_tPendingToken;
bool m_bWasKeyword = false;
bool m_bEmpty = false;
bool m_bQuoted = false;
int m_iOvershortStep = 0;
int m_iQuorumQuote = -1;
int m_iQuorumFSlash = -1;
bool m_bCheckNumber = false;
StrVec_t m_dIntTokens;
CSphVector < CSphVector<int> > m_dZoneVecs;
CSphVector<XQLimitSpec_t *> m_dStateSpec;
CSphVector<XQLimitSpec_t *> m_dSpecPool;
CSphVector<int> m_dPhraseStar;
protected:
bool HandleFieldBlockStart ( const char * & pPtr ) override;
private:
XQNode_t * ParseRegex ( const char * pStart );
};
//////////////////////////////////////////////////////////////////////////
static int yylex ( YYSTYPE * lvalp, XQParser_t * pParser )
{
return pParser->GetToken ( lvalp );
}
void yyerror ( XQParser_t * pParser, const char * sMessage )
{
if ( pParser->m_pParsed->m_sParseError.IsEmpty() )
pParser->m_pParsed->m_sParseError.SetSprintf ( "P08: %s near '%s'", sMessage, pParser->m_pErrorAt );
}
#include "bissphinxquery.c"
//////////////////////////////////////////////////////////////////////////
void XQLimitSpec_t::SetFieldSpec ( const FieldMask_t& uMask, int iMaxPos )
{
m_bFieldSpec = true;
m_dFieldMask = uMask;
m_iFieldMaxPos = iMaxPos;
}
/// ctor
XQNode_t::XQNode_t ( const XQLimitSpec_t & dSpec )
: m_dSpec ( dSpec )
{
#ifdef XQ_DUMP_NODE_ADDR
printf ( "node new 0x%08x\n", this );
#endif
}
/// dtor
XQNode_t::~XQNode_t ()
{
#ifdef XQ_DUMP_NODE_ADDR
printf ( "node deleted %d 0x%08x\n", m_eOp, this );
#endif
ARRAY_FOREACH ( i, m_dChildren )
SafeDelete ( m_dChildren[i] );
}
void XQNode_t::SetFieldSpec ( const FieldMask_t& uMask, int iMaxPos )
{
// set it, if we do not yet have one
if ( !m_dSpec.m_bFieldSpec )
m_dSpec.SetFieldSpec ( uMask, iMaxPos );
// some of the children might not yet have a spec, even if the node itself has
// eg. 'hello @title world' (whole node has '@title' spec but 'hello' node does not have any!)
ARRAY_FOREACH ( i, m_dChildren )
m_dChildren[i]->SetFieldSpec ( uMask, iMaxPos );
}
void XQLimitSpec_t::SetZoneSpec ( const CSphVector<int> & dZones, bool bZoneSpan )
{
m_dZones = dZones;
m_bZoneSpan = bZoneSpan;
}
void XQNode_t::SetZoneSpec ( const CSphVector<int> & dZones, bool bZoneSpan )
{
// set it, if we do not yet have one
if ( !m_dSpec.m_dZones.GetLength() )
m_dSpec.SetZoneSpec ( dZones, bZoneSpan );
// some of the children might not yet have a spec, even if the node itself has
ARRAY_FOREACH ( i, m_dChildren )
m_dChildren[i]->SetZoneSpec ( dZones, bZoneSpan );
}
void XQNode_t::CopySpecs ( const XQNode_t * pSpecs )
{
if ( !pSpecs )
return;
if ( !m_dSpec.m_bFieldSpec )
m_dSpec.SetFieldSpec ( pSpecs->m_dSpec.m_dFieldMask, pSpecs->m_dSpec.m_iFieldMaxPos );
if ( !m_dSpec.m_dZones.GetLength() )
m_dSpec.SetZoneSpec ( pSpecs->m_dSpec.m_dZones, pSpecs->m_dSpec.m_bZoneSpan );
}
void XQNode_t::ClearFieldMask ()
{
m_dSpec.m_dFieldMask.SetAll();
ARRAY_FOREACH ( i, m_dChildren )
m_dChildren[i]->ClearFieldMask();
}
uint64_t XQNode_t::GetHash () const
{
if ( m_iMagicHash )
return m_iMagicHash;
XQOperator_e dZeroOp[2];
dZeroOp[0] = m_eOp;
dZeroOp[1] = (XQOperator_e) 0;
ARRAY_FOREACH ( i, m_dWords )
m_iMagicHash = 100 + ( m_iMagicHash ^ sphFNV64 ( m_dWords[i].m_sWord.cstr() ) ); // +100 to make it non-transitive
ARRAY_FOREACH ( j, m_dChildren )
m_iMagicHash = 100 + ( m_iMagicHash ^ m_dChildren[j]->GetHash() ); // +100 to make it non-transitive
m_iMagicHash += 1000000; // to immerse difference between parents and children
m_iMagicHash ^= sphFNV64 ( dZeroOp );
return m_iMagicHash;
}
uint64_t XQNode_t::GetFuzzyHash () const
{
if ( m_iFuzzyHash )
return m_iFuzzyHash;
XQOperator_e dZeroOp[2];
dZeroOp[0] = ( m_eOp==SPH_QUERY_PHRASE ? SPH_QUERY_PROXIMITY : m_eOp );
dZeroOp[1] = (XQOperator_e) 0;
ARRAY_FOREACH ( i, m_dWords )
m_iFuzzyHash = 100 + ( m_iFuzzyHash ^ sphFNV64 ( m_dWords[i].m_sWord.cstr() ) ); // +100 to make it non-transitive
ARRAY_FOREACH ( j, m_dChildren )
m_iFuzzyHash = 100 + ( m_iFuzzyHash ^ m_dChildren[j]->GetFuzzyHash () ); // +100 to make it non-transitive
m_iFuzzyHash += 1000000; // to immerse difference between parents and children
m_iFuzzyHash ^= sphFNV64 ( dZeroOp );
return m_iFuzzyHash;
}
void XQNode_t::SetOp ( XQOperator_e eOp, XQNode_t * pArg1, XQNode_t * pArg2 )
{
m_eOp = eOp;
m_dChildren.Reset();
if ( pArg1 )
{
m_dChildren.Add ( pArg1 );
pArg1->m_pParent = this;
}
if ( pArg2 )
{
m_dChildren.Add ( pArg2 );
pArg2->m_pParent = this;
}
}
int XQNode_t::FixupAtomPos()
{
assert ( m_eOp==SPH_QUERY_PROXIMITY && m_dWords.GetLength()>0 );
ARRAY_FOREACH ( i, m_dWords )
{
int iSub = m_dWords[i].m_iSkippedBefore-1;
if ( iSub>0 )
{
for ( int j = i; j < m_dWords.GetLength(); j++ )
m_dWords[j].m_iAtomPos -= iSub;
}
}
return m_dWords.Last().m_iAtomPos+1;
}
XQNode_t * XQNode_t::Clone ()
{
XQNode_t * pRet = new XQNode_t ( m_dSpec );
pRet->SetOp ( m_eOp );
pRet->m_dWords = m_dWords;
pRet->m_iOpArg = m_iOpArg;
pRet->m_iAtomPos = m_iAtomPos;
pRet->m_iUser = m_iUser;
pRet->m_bVirtuallyPlain = m_bVirtuallyPlain;
pRet->m_bNotWeighted = m_bNotWeighted;
pRet->m_bPercentOp = m_bPercentOp;
if ( m_dChildren.GetLength()==0 )
return pRet;
pRet->m_dChildren.Reserve ( m_dChildren.GetLength() );
for ( int i = 0; i < m_dChildren.GetLength(); ++i )
{
pRet->m_dChildren.Add ( m_dChildren[i]->Clone() );
pRet->m_dChildren.Last()->m_pParent = pRet;
}
return pRet;
}
bool XQNode_t::ResetHash ()
{
bool bAlreadyReset = ( m_iMagicHash==0 && m_iFuzzyHash==0 );
m_iMagicHash = 0;
m_iFuzzyHash = 0;
return bAlreadyReset;
}
/// return either index of pNode among Parent.m_dChildren, or -1
static int GetNodeChildIndex ( const XQNode_t * pParent, const XQNode_t * pNode )
{
assert ( pParent && pNode );
ARRAY_FOREACH ( i, pParent->m_dChildren )
if ( pParent->m_dChildren[i]==pNode )
return i;
return -1;
}
//////////////////////////////////////////////////////////////////////////
XQParser_t::XQParser_t ()
{
m_dSpecPool.Add ( new XQLimitSpec_t() );
m_dStateSpec.Add ( m_dSpecPool.Last() );
}
XQParser_t::~XQParser_t ()
{
ARRAY_FOREACH ( i, m_dSpecPool )
SafeDelete ( m_dSpecPool[i] );
}
/// cleanup spawned nodes (for bailing out on errors)
void XQParser_t::Cleanup ()
{
XQParseHelper_c::Cleanup();
m_dStateSpec.Reset();
}
/// my special chars
bool XQParser_t::IsSpecial ( char c )
{
return c=='(' || c==')' || c=='|' || c=='-' || c=='!' || c=='@' || c=='~' || c=='"' || c=='/';
}
/// helper find-or-add (make it generic and move to sphinxstd?)
static int GetZoneIndex ( XQQuery_t * pQuery, const CSphString & sZone )
{
ARRAY_FOREACH ( i, pQuery->m_dZones )
if ( pQuery->m_dZones[i]==sZone )
return i;
pQuery->m_dZones.Add ( sZone );
return pQuery->m_dZones.GetLength()-1;
}
/// parse zone
int XQParser_t::ParseZone ( const char * pZone )
{
const char * p = pZone;
// case one, just a single zone name
if ( sphIsAlpha ( *pZone ) )
{
// find zone name
while ( sphIsAlpha(*p) )
p++;
m_pTokenizer->SetBufferPtr ( p );
// extract and lowercase it
CSphString sZone;
sZone.SetBinary ( pZone, int(p-pZone) );
sZone.ToLower();
// register it in zones list
int iZone = GetZoneIndex ( m_pParsed, sZone );
// create new 1-zone vector
m_dZoneVecs.Add().Add ( iZone );
return m_dZoneVecs.GetLength()-1;
}
// case two, zone block
// it must follow strict (name1,name2,...) syntax
if ( *pZone=='(' )
{
// create new zone vector
CSphVector<int> & dZones = m_dZoneVecs.Add();
p = ++pZone;
// scan names
while (true)
{
// syntax error, name expected!
if ( !sphIsAlpha(*p) )
{
Error ( "unexpected character '%c' in zone block operator", *p );
return -1;
}
// scan next name
while ( sphIsAlpha(*p) )
p++;
// extract and lowercase it
CSphString sZone;
sZone.SetBinary ( pZone, int(p-pZone) );
sZone.ToLower();
// register it in zones list
dZones.Add ( GetZoneIndex ( m_pParsed, sZone ) );
// must be either followed by comma, or closing paren
// everything else will cause syntax error
if ( *p==')' )
{
m_pTokenizer->SetBufferPtr ( p+1 );
break;
}
if ( *p==',' )
pZone = ++p;
}
return m_dZoneVecs.GetLength()-1;
}
// unhandled case
Error ( "internal error, unhandled case in ParseZone()" );
return -1;
}
XQNode_t * XQParser_t::ParseRegex ( const char * sStart )
{
assert ( sStart );
int iDel = *sStart++;
const char * sEnd = m_pTokenizer->GetBufferEnd ();
const char * sToken = sStart;
while ( sStart<sEnd )
{
const char * sNextDel = (const char *)memchr ( sStart, iDel, sEnd-sStart );
if ( !sNextDel )
break;
if ( sNextDel+1<sEnd && sNextDel[1]==')' )
{
// spawn token node
XQNode_t * pNode = AddKeyword ( nullptr, 0 );
pNode->m_dWords[0].m_sWord.SetBinary ( sToken, sNextDel-sToken );
pNode->m_dWords[0].m_bRegex = true;
// skip the whole expression
m_pTokenizer->SetBufferPtr ( sNextDel+2 );
return pNode;
}
sStart = sNextDel+1;
}
// not a complete REGEX(/term/)
return nullptr;
}
bool XQParser_t::GetNumber ( const char * p, const char * sRestart )
{
int iDots = 0;
const char * sToken = p;
const char * sEnd = m_pTokenizer->GetBufferEnd ();
while ( p<sEnd && ( isdigit ( *(const BYTE*)p ) || *p=='.' ) )
{
iDots += ( *p=='.' );
p++;
}
// must be float number but got many dots or only dot
if ( iDots && ( iDots>1 || p-sToken==iDots ) )
p = sToken;
// float as number allowed only as quorum argument and regular keywords stream otherwise
if ( iDots==1 && ( m_iQuorumQuote!=m_iQuorumFSlash || m_iQuorumQuote!=m_iAtomPos ) )
p = sToken;
static const int NUMBER_BUF_LEN = 10; // max strlen of int32
if ( p>sToken && p-sToken<NUMBER_BUF_LEN
&& !( *p=='-' && !( p-sToken==1 && sphIsModifier ( p[-1] ) ) ) // !bDashInside copied over from arbitration
&& ( *p=='\0' || sphIsSpace(*p) || IsSpecial(*p) ) )
{
// float as quorum argument has higher precedence than blended
bool bQuorum = ( m_iQuorumQuote==m_iQuorumFSlash && m_iQuorumFSlash==m_iAtomPos );
bool bQuorumPercent = ( bQuorum && iDots==1 );
bool bTok = ( m_pTokenizer->GetToken()!=NULL );
if ( bTok && m_pTokenizer->TokenIsBlended() && !( bQuorum || bQuorumPercent ) ) // number with blended should be tokenized as usual
{
m_pTokenizer->SkipBlended();
m_pTokenizer->SetBufferPtr ( sRestart );
} else if ( bTok && m_pTokenizer->WasTokenSynonym() && !( bQuorum || bQuorumPercent ) )
{
m_pTokenizer->SetBufferPtr ( sRestart );
} else
{
// got not a very long number followed by a whitespace or special, handle it
char sNumberBuf[NUMBER_BUF_LEN];
int iNumberLen = Min ( (int)sizeof(sNumberBuf)-1, int(p-sToken) );
memcpy ( sNumberBuf, sToken, iNumberLen );
sNumberBuf[iNumberLen] = '\0';
if ( iDots )
m_tPendingToken.tInt.fValue = (float)strtod ( sNumberBuf, NULL );
else
m_tPendingToken.tInt.iValue = atoi ( sNumberBuf );
// check if it can be used as a keyword too
m_pTokenizer->SetBuffer ( (BYTE*)sNumberBuf, iNumberLen );
sToken = (const char*) m_pTokenizer->GetToken();
m_pTokenizer->SetBuffer ( m_sQuery, m_iQueryLen );
m_pTokenizer->SetBufferPtr ( p );
m_tPendingToken.tInt.iStrIndex = -1;
if ( sToken )
{
m_dIntTokens.Add ( sToken );
if ( m_pDict->GetWordID ( (BYTE*)const_cast<char*>(sToken) ) )
m_tPendingToken.tInt.iStrIndex = m_dIntTokens.GetLength()-1;
else
m_dIntTokens.Pop();
m_iAtomPos++;
}
m_iPendingNulls = 0;
m_iPendingType = iDots ? TOK_FLOAT : TOK_INT;
return true;
}
}
return false;
}
static bool GetNearToken ( const char * sTok, int iTokLen, int iTokType, const char * sBuf, const TokenizerRefPtr_c& pTokenizer, int & iPendingType, YYSTYPE & tPendingToken )
{
if ( strncmp ( sBuf, sTok, iTokLen )==0 && isdigit(sBuf[iTokLen]) )
{
// extract that int
int iVal = 0;
for ( sBuf=sBuf+iTokLen; isdigit ( *sBuf ); sBuf++ )
iVal = iVal*10 + ( *sBuf ) - '0'; // FIXME! check for overflow?
pTokenizer->SetBufferPtr ( sBuf );
// we just lexed our next token
iPendingType = iTokType;
tPendingToken.tInt.iValue = iVal;
tPendingToken.tInt.iStrIndex = -1;
return true;
}
return false;
}
/// a lexer of my own
int XQParser_t::GetToken ( YYSTYPE * lvalp )
{
bool bWasFrontModifier = false; // used to print warning
// what, noone's pending for a bending?!
if ( !m_iPendingType )
while (true)
{
assert ( m_iPendingNulls==0 );
bool bWasKeyword = m_bWasKeyword;
m_bWasKeyword = false;
int iSkippedPosBeforeToken = 0;
if ( m_bWasBlended )
{
iSkippedPosBeforeToken = m_pTokenizer->SkipBlended();
// just add all skipped blended parts except blended head (already added to atomPos)
if ( iSkippedPosBeforeToken>1 )
m_iAtomPos += iSkippedPosBeforeToken - 1;
}
// tricky stuff
// we need to manually check for numbers in certain states (currently, just after proximity or quorum operator)
// required because if 0-9 are not in charset_table, or min_word_len is too high,
// the tokenizer will *not* return the number as a token!
const char * pTokenStart = m_pTokenizer->GetBufferPtr();
const char * pLastTokenEnd = m_pTokenizer->GetTokenEnd();
const char * sBufferEnd = m_pTokenizer->GetBufferEnd();
m_pErrorAt = pTokenStart;
const char * p = pTokenStart;
while ( p<sBufferEnd && isspace ( *(const BYTE*)p ) ) p++; // to avoid CRT assertions on Windows
if ( m_bCheckNumber )
{
m_bCheckNumber = false;
if ( GetNumber ( p, pTokenStart ) )
break;
}
// not a number, long number, or number not followed by a whitespace, so fallback to regular tokenizing
const char * sToken = (const char *) m_pTokenizer->GetToken ();
if ( !sToken )
{
m_iPendingNulls = m_pTokenizer->GetOvershortCount() * m_iOvershortStep;
if ( !( m_iPendingNulls || m_pTokenizer->GetBufferPtr()-p>0 ) )
return 0;
m_iPendingNulls = 0;
lvalp->pNode = AddKeyword ( nullptr, iSkippedPosBeforeToken );
m_bWasKeyword = true;
return TOK_KEYWORD;
}
// now let's do some token post-processing
m_bWasBlended = m_pTokenizer->TokenIsBlended();
m_bEmpty = false;
int iPrevDeltaPos = 0;
if ( m_pPlugin && m_pPlugin->m_fnPushToken )
sToken = m_pPlugin->m_fnPushToken ( m_pPluginData, const_cast<char*>(sToken), &iPrevDeltaPos, m_pTokenizer->GetTokenStart(), int ( m_pTokenizer->GetTokenEnd() - m_pTokenizer->GetTokenStart() ) );
if ( !sToken )
return 0;
m_iPendingNulls = m_pTokenizer->GetOvershortCount() * m_iOvershortStep;
m_iAtomPos += 1 + m_iPendingNulls;
if ( iPrevDeltaPos>1 ) // to match with condifion of m_bWasBlended above
m_iAtomPos += ( iPrevDeltaPos - 1);
bool bMultiDestHead = false;
bool bMultiDest = false;
int iDestCount = 0;
// do nothing inside phrase
if ( !m_pTokenizer->IsPhraseMode() )
bMultiDest = m_pTokenizer->WasTokenMultiformDestination ( bMultiDestHead, iDestCount );
// handle NEAR (must be case-sensitive, and immediately followed by slash and int)
if ( !bMultiDest && p && !m_pTokenizer->IsPhraseMode() &&
( GetNearToken ( "NEAR/", 5, TOK_NEAR, p, m_pTokenizer, m_iPendingType, m_tPendingToken )
|| GetNearToken ( "NOTNEAR/", 8, TOK_NOTNEAR, p, m_pTokenizer, m_iPendingType, m_tPendingToken ) ) )
{
m_iAtomPos -= 1; // skip token
break;
}
// handle SENTENCE
if ( !bMultiDest && p && !m_pTokenizer->IsPhraseMode() && !strcasecmp ( sToken, "sentence" ) && !strncmp ( p, "SENTENCE", 8 ) )
{
// we just lexed our next token
m_iPendingType = TOK_SENTENCE;
m_iAtomPos -= 1;
break;
}
// handle PARAGRAPH
if ( !bMultiDest && p && !m_pTokenizer->IsPhraseMode() && !strcasecmp ( sToken, "paragraph" ) && !strncmp ( p, "PARAGRAPH", 9 ) )
{
// we just lexed our next token
m_iPendingType = TOK_PARAGRAPH;
m_iAtomPos -= 1;
break;
}
// handle MAYBE
if ( !bMultiDest && p && !m_pTokenizer->IsPhraseMode() && !strcasecmp ( sToken, "maybe" ) && !strncmp ( p, "MAYBE", 5 ) )
{
// we just lexed our next token
m_iPendingType = TOK_MAYBE;
m_iAtomPos -= 1;
break;
}
// handle ZONE
if ( !bMultiDest && p && !m_pTokenizer->IsPhraseMode() && !strncmp ( p, "ZONE:", 5 )
&& ( sphIsAlpha(p[5]) || p[5]=='(' ) )
{
// ParseZone() will update tokenizer buffer ptr as needed
int iVec = ParseZone ( p+5 );
if ( iVec<0 )
return -1;
// we just lexed our next token
m_iPendingType = TOK_ZONE;
m_tPendingToken.iZoneVec = iVec;
m_iAtomPos -= 1;
break;
}
// handle ZONESPAN
if ( !bMultiDest && p && !m_pTokenizer->IsPhraseMode() && !strncmp ( p, "ZONESPAN:", 9 )
&& ( sphIsAlpha(p[9]) || p[9]=='(' ) )
{
// ParseZone() will update tokenizer buffer ptr as needed
int iVec = ParseZone ( p+9 );
if ( iVec<0 )
return -1;
// we just lexed our next token
m_iPendingType = TOK_ZONESPAN;
m_tPendingToken.iZoneVec = iVec;
m_iAtomPos -= 1;
break;
}
// handle REGEX
if ( !bMultiDest && p && !strncmp ( p, "REGEX(", 6 ) )
{
// we just lexed our REGEX token
XQNode_t * pRegex = ParseRegex ( p+6 );
if ( pRegex )
{
m_tPendingToken.pNode = pRegex;
m_iPendingType = TOK_REGEX;
break;
}
}
// count [ * ] at phrase node for qpos shift
if ( m_pTokenizer->IsPhraseMode() && pLastTokenEnd )
{
if ( sToken[0]=='*' && sToken[1]=='\0' ) // phrase star should be separate token
{
m_dPhraseStar.Add ( m_iAtomPos );
} else
{
int iSpace = 0;
int iStar = 0;
const char * sCur = pLastTokenEnd;
const char * sEnd = m_pTokenizer->GetTokenStart();
for ( ; sCur<sEnd; sCur++ )
{
int iCur = int ( sCur - pLastTokenEnd );
switch ( *sCur )
{
case '*':
iStar = int ( sCur - pLastTokenEnd );
break;
case ' ':
if ( iSpace+2==iCur && iStar+1==iCur ) // match only [ * ] (separate single star) as valid shift operator
m_dPhraseStar.Add ( m_iAtomPos );
iSpace = iCur;
break;
}
}
}
}
// handle specials
if ( m_pTokenizer->WasTokenSpecial() )
{
// specials must not affect pos
m_iAtomPos--;
// some specials are especially special
if ( sToken[0]=='@' )
{
bool bIgnore;
// parse fields operator
if ( !ParseFields ( m_tPendingToken.tFieldLimit.dMask, m_tPendingToken.tFieldLimit.iMaxPos, bIgnore ) )
return -1;
if ( bIgnore )
continue;
m_iPendingType = TOK_FIELDLIMIT;
break;
} else if ( sToken[0]=='<' )
{
if ( *m_pTokenizer->GetBufferPtr()=='<' )
{
// got "<<", aka operator BEFORE
m_iPendingType = TOK_BEFORE;
break;
} else
{
// got stray '<', ignore
if ( m_iPendingNulls>0 )
{
m_iPendingNulls = 0;
lvalp->pNode = AddKeyword ( nullptr, iSkippedPosBeforeToken );
m_bWasKeyword = true;
return TOK_KEYWORD;
}
continue;
}
} else if ( sToken[0]=='^' )
{
const char * pTokEnd = m_pTokenizer->GetTokenEnd();
if ( pTokEnd<m_pTokenizer->GetBufferEnd() && !sphIsSpace ( pTokEnd[0] ) )
bWasFrontModifier = true;
// this special is handled in HandleModifiers()
continue;
} else if ( sToken[0]=='$' )
{
if ( bWasKeyword )
continue;
if ( sphIsSpace ( m_pTokenizer->GetTokenStart() [ -1 ] ) )
continue;
// right after overshort
if ( m_pTokenizer->GetOvershortCount()==1 )
{
m_iPendingNulls = 0;
lvalp->pNode = AddKeyword ( nullptr, iSkippedPosBeforeToken );
return TOK_KEYWORD;
}
Warning ( "modifiers must be applied to keywords, not operators" );
// this special is handled in HandleModifiers()
continue;
} else
{
bool bWasQuoted = m_bQuoted;
// all the other specials are passed to parser verbatim
if ( sToken[0]=='"' )
{
m_bQuoted = !m_bQuoted;
if ( m_bQuoted )
m_dPhraseStar.Resize ( 0 );
}
m_iPendingType = sToken[0];
m_pTokenizer->SetPhraseMode ( m_bQuoted );
if ( sToken[0]=='(' )
{
XQLimitSpec_t * pLastField = m_dStateSpec.Last();
m_dStateSpec.Add ( pLastField );
} else if ( sToken[0]==')' && m_dStateSpec.GetLength()>1 )
{
m_dStateSpec.Pop();
}
if ( bWasQuoted && !m_bQuoted )
m_iQuorumQuote = m_iAtomPos;
else if ( sToken[0]=='/' )
m_iQuorumFSlash = m_iAtomPos;
if ( sToken[0]=='~' ||sToken[0]=='/' )
m_bCheckNumber = true;
break;
}
}
// check for stopword, and create that node
// temp buffer is required, because GetWordID() might expand (!) the keyword in-place
BYTE sTmp [ MAX_TOKEN_BYTES ];
strncpy ( (char*)sTmp, sToken, MAX_TOKEN_BYTES );
sTmp[MAX_TOKEN_BYTES-1] = '\0';
int iStopWord = 0;
if ( m_pPlugin && m_pPlugin->m_fnPreMorph )
m_pPlugin->m_fnPreMorph ( m_pPluginData, (char*)sTmp, &iStopWord );
SphWordID_t uWordId = iStopWord ? 0 : m_pDict->GetWordID ( sTmp );
if ( uWordId && m_pPlugin && m_pPlugin->m_fnPostMorph )
{
int iRes = m_pPlugin->m_fnPostMorph ( m_pPluginData, (char*)sTmp, &iStopWord );
if ( iStopWord )
uWordId = 0;
else if ( iRes )
uWordId = m_pDict->GetWordIDNonStemmed ( sTmp );
}
if ( !uWordId )
{
sToken = nullptr;
// stopwords with step=0 must not affect pos
if ( m_bEmptyStopword )
m_iAtomPos--;
}
if ( bMultiDest && !bMultiDestHead )
{
assert ( m_dMultiforms.GetLength() );
m_dMultiforms.Last().m_iDestCount++;
m_dDestForms.Add ( sToken );
m_bWasKeyword = true;
} else
{
m_tPendingToken.pNode = AddKeyword ( sToken, iSkippedPosBeforeToken );
m_iPendingType = TOK_KEYWORD;
}
if ( bMultiDestHead )
{
MultiformNode_t & tMulti = m_dMultiforms.Add();
tMulti.m_pNode = m_tPendingToken.pNode;
tMulti.m_iDestStart = m_dDestForms.GetLength();
tMulti.m_iDestCount = 0;
}
if ( !bMultiDest || bMultiDestHead )
break;
}
if ( bWasFrontModifier && m_iPendingType!=TOK_KEYWORD )
Warning ( "modifiers must be applied to keywords, not operators" );
// someone must be pending now!
assert ( m_iPendingType );
m_bEmpty = false;
// ladies first, though
if ( m_iPendingNulls>0 )
{
m_iPendingNulls--;
lvalp->pNode = AddKeyword ( nullptr );
m_bWasKeyword = true;
return TOK_KEYWORD;
}
// pending the offending
int iRes = m_iPendingType;
m_iPendingType = 0;
if ( iRes==TOK_KEYWORD )
m_bWasKeyword = true;
*lvalp = m_tPendingToken;
return iRes;
}
void XQParser_t::AddQuery ( XQNode_t * pNode )
{
m_pRoot = pNode;
}
// Handle modifiers:
// 1) ^ - field start
// 2) $ - field end
// 3) ^1.234 - keyword boost
// keyword$^1.234 - field end with boost are on
// keywords^1.234$ - only boost here, '$' it NOT modifier
void XQParser_t::HandleModifiers ( XQKeyword_t & tKeyword )
{
const char * sTokStart = m_pTokenizer->GetTokenStart();
const char * sTokEnd = m_pTokenizer->GetTokenEnd();
if ( !sTokStart || !sTokEnd )
return;
const char * sQuery = reinterpret_cast<char *> ( m_sQuery );
tKeyword.m_bFieldStart = ( sTokStart-sQuery )>0 && sTokStart [ -1 ]=='^' &&
!( ( sTokStart-sQuery )>1 && sTokStart [ -2 ]=='\\' );
if ( sTokEnd[0]=='$' )
{
tKeyword.m_bFieldEnd = true;
++sTokEnd; // Skipping.
}
if ( sTokEnd[0]=='^' && ( sTokEnd[1]=='.' || sphIsDigital ( sTokEnd[1] ) ) )
{
// Probably we have a boost, lets check.
char * pEnd;
float fBoost = (float)strtod ( sTokEnd+1, &pEnd );
if ( ( sTokEnd+1 )!=pEnd )
{
// We do have a boost.
// FIXME Handle ERANGE errno here.
tKeyword.m_fBoost = fBoost;
m_pTokenizer->SetBufferPtr ( pEnd );
}
}
}
XQNode_t * XQParser_t::AddKeyword ( const char * sKeyword, int iSkippedPosBeforeToken )
{
XQKeyword_t tAW ( sKeyword, m_iAtomPos );
tAW.m_iSkippedBefore = iSkippedPosBeforeToken;
HandleModifiers ( tAW );
XQNode_t * pNode = new XQNode_t ( *m_dStateSpec.Last() );
pNode->m_dWords.Add ( tAW );
m_dSpawned.Add ( pNode );
return pNode;
}
XQNode_t * XQParser_t::AddKeyword ( XQNode_t * pLeft, XQNode_t * pRight )
{
if ( !pLeft || !pRight )
return pLeft ? pLeft : pRight;
assert ( pLeft->m_dWords.GetLength()>0 );
assert ( pRight->m_dWords.GetLength()==1 );
pLeft->m_dWords.Add ( pRight->m_dWords[0] );
m_dSpawned.RemoveValue ( pRight );
SafeDelete ( pRight );
return pLeft;
}
static bool HasMissedField ( const XQLimitSpec_t & tSpec )
{
return ( tSpec.m_dFieldMask.TestAll ( false ) && tSpec.m_iFieldMaxPos==0 && !tSpec.m_bZoneSpan && tSpec.m_dZones.GetLength()==0 );
}
XQNode_t * XQParser_t::AddOp ( XQOperator_e eOp, XQNode_t * pLeft, XQNode_t * pRight, int iOpArg )
{
/////////
// unary
/////////
if ( eOp==SPH_QUERY_NOT )
{
XQNode_t * pNode = new XQNode_t ( *m_dStateSpec.Last() );
pNode->SetOp ( SPH_QUERY_NOT, pLeft );
m_dSpawned.Add ( pNode );
return pNode;
}
//////////
// binary
//////////
if ( !pLeft || !pRight )
return pLeft ? pLeft : pRight;
// build a new node
XQNode_t * pResult = NULL;
if ( pLeft->m_dChildren.GetLength() && pLeft->GetOp()==eOp && pLeft->m_iOpArg==iOpArg )
{
pLeft->m_dChildren.Add ( pRight );
pRight->m_pParent = pLeft;
pResult = pLeft;
} else
{
// however-2, beside all however below, [@@relaxed ((@title hello) | (@missed world)) @body other terms]
// we should use valid (left) field mask for complex (OR) node
// as right node in this case has m_bFieldSpec==true but m_dFieldMask==0
const XQLimitSpec_t & tSpec = HasMissedField ( pRight->m_dSpec ) ? pLeft->m_dSpec : pRight->m_dSpec;
// however, it's right (!) spec which is chosen for the resulting node,
// eg. '@title hello' + 'world @body program'
XQNode_t * pNode = new XQNode_t ( tSpec );
pNode->SetOp ( eOp, pLeft, pRight );
pNode->m_iOpArg = iOpArg;
m_dSpawned.Add ( pNode );
pResult = pNode;
}
return pResult;
}
void XQParser_t::SetPhrase ( XQNode_t * pNode, bool bSetExact )
{
if ( !pNode )
return;
assert ( pNode->m_dWords.GetLength() );
if ( bSetExact )
{
ARRAY_FOREACH ( iWord, pNode->m_dWords )
{
if ( !pNode->m_dWords[iWord].m_sWord.IsEmpty() )
pNode->m_dWords[iWord].m_sWord.SetSprintf ( "=%s", pNode->m_dWords[iWord].m_sWord.cstr() );
}
}
pNode->SetOp ( SPH_QUERY_PHRASE );
PhraseShiftQpos ( pNode );
}
void XQParser_t::PhraseShiftQpos ( XQNode_t * pNode )
{
if ( !m_dPhraseStar.GetLength() )
return;
const int * pLast = m_dPhraseStar.Begin();
const int * pEnd = m_dPhraseStar.Begin() + m_dPhraseStar.GetLength();
int iQposShiftStart = *pLast;
int iQposShift = 0;
int iLastStarPos = *pLast;
ARRAY_FOREACH ( iWord, pNode->m_dWords )
{
XQKeyword_t & tWord = pNode->m_dWords[iWord];
// fold stars in phrase till current term position
while ( pLast<pEnd && *(pLast)<=tWord.m_iAtomPos )
{
iLastStarPos = *pLast;
pLast++;
iQposShift++;
}
// star dictionary passes raw star however regular dictionary suppress it
// raw star also might be suppressed by min_word_len option
// so remove qpos shift from duplicated raw star term
// however not stopwords that is also term with empty word
if ( tWord.m_sWord=="*" || ( tWord.m_sWord.IsEmpty() && tWord.m_iAtomPos==iLastStarPos ) )
{
pNode->m_dWords.Remove ( iWord-- );
iQposShift--;
continue;
}
if ( iQposShiftStart<=tWord.m_iAtomPos )
tWord.m_iAtomPos += iQposShift;
}
}
bool XQParser_t::Parse ( XQQuery_t & tParsed, const char * sQuery, const CSphQuery * pQuery, const TokenizerRefPtr_c& pTokenizer, const CSphSchema * pSchema, const DictRefPtr_c& pDict, const CSphIndexSettings & tSettings, const CSphBitvec * pMorphFields )
{
// FIXME? might wanna verify somehow that pTokenizer has all the specials etc from sphSetupQueryTokenizer
assert ( pTokenizer->IsQueryTok() );
// most outcomes are errors
SafeDelete ( tParsed.m_pRoot );
// check for relaxed syntax
const char * OPTION_RELAXED = "@@relaxed";
auto OPTION_RELAXED_LEN = (const int) strlen ( OPTION_RELAXED );
m_bStopOnInvalid = true;
if ( sQuery && strncmp ( sQuery, OPTION_RELAXED, OPTION_RELAXED_LEN )==0 && !sphIsAlpha ( sQuery[OPTION_RELAXED_LEN] ) )
{
sQuery += OPTION_RELAXED_LEN;
m_bStopOnInvalid = false;
}
m_pPlugin = nullptr;
m_pPluginData = nullptr;
if ( pQuery && !pQuery->m_sQueryTokenFilterName.IsEmpty() )
{
CSphString sError;
m_pPlugin = PluginAcquire<PluginQueryTokenFilter_c> ( pQuery->m_sQueryTokenFilterLib.cstr(), PLUGIN_QUERY_TOKEN_FILTER, pQuery->m_sQueryTokenFilterName.cstr(), tParsed.m_sParseError );
if ( !m_pPlugin )
return false;
char szError [ SPH_UDF_ERROR_LEN ];
if ( m_pPlugin->m_fnInit && m_pPlugin->m_fnInit ( &m_pPluginData, MAX_TOKEN_BYTES, pQuery->m_sQueryTokenFilterOpts.cstr(), szError )!=0 )
{
tParsed.m_sParseError = sError;
m_pPlugin = nullptr;
m_pPluginData = nullptr;
return false;
}
}
// setup parser
DictRefPtr_c pMyDict = GetStatelessDict ( pDict );
Setup ( pSchema, pTokenizer->Clone ( SPH_CLONE ), pMyDict, &tParsed, tSettings );
m_sQuery = (BYTE*)const_cast<char*>(sQuery);
m_iQueryLen = sQuery ? (int) strlen(sQuery) : 0;
m_iPendingNulls = 0;
m_iPendingType = 0;
m_pRoot = nullptr;
m_bEmpty = true;
m_iOvershortStep = tSettings.m_iOvershortStep;
m_pTokenizer->SetBuffer ( m_sQuery, m_iQueryLen );
int iRes = yyparse ( this );
if ( m_pPlugin )
{
if ( m_pPlugin->m_fnDeinit )
m_pPlugin->m_fnDeinit ( m_pPluginData );
m_pPlugin = nullptr;
m_pPluginData = nullptr;
}
if ( ( iRes || !m_pParsed->m_sParseError.IsEmpty() ) && !m_bEmpty )
{
Cleanup ();
return false;
}
bool bNotOnlyAllowed = g_bOnlyNotAllowed;
if ( pQuery )
bNotOnlyAllowed |= pQuery->m_bNotOnlyAllowed;
XQNode_t * pNewRoot = FixupTree ( m_pRoot, *m_dStateSpec.Last(), pMorphFields, bNotOnlyAllowed );
if ( !pNewRoot )
{
Cleanup();
return false;
}
tParsed.m_pRoot = pNewRoot;
return true;
}
bool XQParser_t::HandleFieldBlockStart ( const char * & pPtr )
{
if ( *pPtr=='(' )
{
// handle @(
pPtr++;
return true;
}
return false;
}
//////////////////////////////////////////////////////////////////////////
#ifdef XQDEBUG
static void xqIndent ( int iIndent )
{
iIndent *= 2;
while ( iIndent-- )
printf ( "|-" );
}
void xqDump ( const XQNode_t * pNode, int iIndent )
{
#ifdef XQ_DUMP_NODE_ADDR
printf ( "0x%08x ", pNode );
#endif
if ( pNode->m_dChildren.GetLength() )
{
xqIndent ( iIndent );
switch ( pNode->GetOp() )
{
case SPH_QUERY_AND: printf ( "AND:" ); break;
case SPH_QUERY_OR: printf ( "OR:" ); break;
case SPH_QUERY_MAYBE: printf ( "MAYBE:" ); break;
case SPH_QUERY_NOT: printf ( "NOT:" ); break;
case SPH_QUERY_ANDNOT: printf ( "ANDNOT:" ); break;
case SPH_QUERY_BEFORE: printf ( "BEFORE:" ); break;
case SPH_QUERY_PHRASE: printf ( "PHRASE:" ); break;
case SPH_QUERY_PROXIMITY: printf ( "PROXIMITY:" ); break;
case SPH_QUERY_QUORUM: printf ( "QUORUM:" ); break;
case SPH_QUERY_NEAR: printf ( "NEAR:" ); break;
case SPH_QUERY_SENTENCE: printf ( "SENTENCE:" ); break;
case SPH_QUERY_PARAGRAPH: printf ( "PARAGRAPH:" ); break;
default: printf ( "unknown-op-%d:", pNode->GetOp() ); break;
}
printf ( " (%d)\n", pNode->m_dChildren.GetLength() );
ARRAY_FOREACH ( i, pNode->m_dChildren )
{
assert ( pNode->m_dChildren[i]->m_pParent==pNode );
xqDump ( pNode->m_dChildren[i], iIndent+1 );
}
} else
{
xqIndent ( iIndent );
if ( pNode->GetOp()==SPH_QUERY_SCAN )
printf ( "SCAN:" );
else
printf ( "MATCH(%d,%d):", pNode->m_dSpec.m_dFieldMask.GetMask32(), pNode->m_iOpArg );
ARRAY_FOREACH ( i, pNode->m_dWords )
{
const XQKeyword_t & tWord = pNode->m_dWords[i];
const char * sLocTag = "";
if ( tWord.m_bFieldStart ) sLocTag = ", start";
if ( tWord.m_bFieldEnd ) sLocTag = ", end";
printf ( " %s (qpos %d%s)", tWord.m_sWord.cstr(), tWord.m_iAtomPos, sLocTag );
}
printf ( "\n" );
}
}
#endif
CSphString sphReconstructNode ( const XQNode_t * pNode, const CSphSchema * pSchema )
{
CSphString sRes ( "" );
if ( !pNode )
return sRes;
if ( pNode->m_dWords.GetLength() )
{
// say just words to me
const CSphVector<XQKeyword_t> & dWords = pNode->m_dWords;
ARRAY_FOREACH ( i, dWords )
sRes.SetSprintf ( "%s %s", sRes.cstr(), dWords[i].m_sWord.cstr() );
sRes.Trim ();
switch ( pNode->GetOp() )
{
case SPH_QUERY_AND: break;
case SPH_QUERY_PHRASE: sRes.SetSprintf ( "\"%s\"", sRes.cstr() ); break;
case SPH_QUERY_PROXIMITY: sRes.SetSprintf ( "\"%s\"~%d", sRes.cstr(), pNode->m_iOpArg ); break;
case SPH_QUERY_QUORUM: sRes.SetSprintf ( "\"%s\"/%d", sRes.cstr(), pNode->m_iOpArg ); break;
case SPH_QUERY_NEAR: sRes.SetSprintf ( "\"%s\"NEAR/%d", sRes.cstr(), pNode->m_iOpArg ); break;
default: assert ( 0 && "unexpected op in ReconstructNode()" ); break;
}
if ( !pNode->m_dSpec.m_dFieldMask.TestAll(true) )
{
CSphString sFields ( "" );
for ( int i=0; i<SPH_MAX_FIELDS; i++ )
{
if ( !pNode->m_dSpec.m_dFieldMask.Test(i) )
continue;
if ( pSchema )
sFields.SetSprintf ( "%s,%s", sFields.cstr(), pSchema->GetFieldName(i) );
else
sFields.SetSprintf ( "%s,%u", sFields.cstr(), pNode->m_dSpec.m_dFieldMask.GetMask32() );
}
sRes.SetSprintf ( "( @%s: %s )", sFields.cstr()+1, sRes.cstr() );
} else
{
if ( pNode->GetOp()==SPH_QUERY_AND && dWords.GetLength()>1 )
sRes.SetSprintf ( "( %s )", sRes.cstr() ); // wrap bag of words
}
} else
{
ARRAY_FOREACH ( i, pNode->m_dChildren )
{
if ( !i )
{
sRes = sphReconstructNode ( pNode->m_dChildren[i], pSchema );
} else
{
const char * sOp = "(unknown-op)";
switch ( pNode->GetOp() )
{
case SPH_QUERY_AND: sOp = " "; break;
case SPH_QUERY_OR: sOp = "|"; break;
case SPH_QUERY_MAYBE: sOp = "MAYBE"; break;
case SPH_QUERY_NOT: sOp = "NOT"; break;
case SPH_QUERY_ANDNOT: sOp = "AND NOT"; break;
case SPH_QUERY_BEFORE: sOp = "BEFORE"; break;
case SPH_QUERY_NEAR: sOp = "NEAR"; break;
case SPH_QUERY_PHRASE: sOp = ""; break;
default: assert ( 0 && "unexpected op in ReconstructNode()" ); break;
}
if ( pNode->GetOp()==SPH_QUERY_PHRASE )
sRes.SetSprintf ( "\"%s %s\"", sRes.cstr(), sphReconstructNode ( pNode->m_dChildren[i], pSchema ).cstr() );
else
sRes.SetSprintf ( "%s %s %s", sRes.cstr(), sOp, sphReconstructNode ( pNode->m_dChildren[i], pSchema ).cstr() );
}
}
if ( pNode->m_dChildren.GetLength()>1 )
sRes.SetSprintf ( "( %s )", sRes.cstr() );
}
return sRes;
}
bool sphParseExtendedQuery ( XQQuery_t & tParsed, const char * sQuery, const CSphQuery * pQuery, const TokenizerRefPtr_c& pTokenizer, const CSphSchema * pSchema, const DictRefPtr_c& pDict, const CSphIndexSettings & tSettings, const CSphBitvec * pMorphFields )
{
XQParser_t qp;
bool bRes = qp.Parse ( tParsed, sQuery, pQuery, pTokenizer, pSchema, pDict, tSettings, pMorphFields );
#ifndef NDEBUG
if ( bRes && tParsed.m_pRoot )
tParsed.m_pRoot->Check ( true );
#endif
#ifdef XQDEBUG
if ( bRes )
{
printf ( "\n--- query ---\n" );
printf ( "%s\n", sQuery );
xqDump ( tParsed.m_pRoot, 0 );
printf ( "---\n" );
}
#endif
// moved here from ranker creation
// as at that point term expansion could produce many terms from expanded term and this condition got failed
tParsed.m_bSingleWord = ( tParsed.m_pRoot && tParsed.m_pRoot->m_dChildren.GetLength()==0 && tParsed.m_pRoot->m_dWords.GetLength()==1 );
tParsed.m_bEmpty = qp.m_bEmpty;
return bRes;
}
//////////////////////////////////////////////////////////////////////////
// COMMON SUBTREES DETECTION
//////////////////////////////////////////////////////////////////////////
/// Decides if given pTree is appropriate for caching or not. Currently we don't cache
/// the end values (leafs).
static bool IsAppropriate ( const XQNode_t * pTree )
{
if ( !pTree ) return false;
// skip nodes that actually are leaves (eg. "AND smth" node instead of merely "smth")
return !( pTree->m_dWords.GetLength()==1 && pTree->GetOp()!=SPH_QUERY_NOT );
}
typedef CSphOrderedHash < DWORD, uint64_t, IdentityHash_fn, 128 > CDwordHash;
// stores the pair of a tree, and the bitmask of common nodes
// which contains the tree.
class BitMask_t
{
const XQNode_t * m_pTree = nullptr;
uint64_t m_uMask = 0ull;
public:
BitMask_t() = default;
BitMask_t ( const XQNode_t * pTree, uint64_t uMask )
{
m_pTree = pTree;
m_uMask = uMask;
}
inline uint64_t GetMask() const { return m_uMask; }
inline const XQNode_t * GetTree() const { return m_pTree; }
};
// a list of unique values.
class Associations_t : public CDwordHash
{
public:
// returns true when add the second member.
// The reason is that only one is not interesting for us,
// but more than two will flood the caller.
bool Associate2nd ( uint64_t uTree )
{
if ( Exists ( uTree ) )
return false;
Add ( 0, uTree );
return GetLength()==2;
}
// merge with another similar
void Merge ( const Associations_t& parents )
{
for ( const auto& tAssoc : parents )
Associate2nd ( tAssoc.first );
}
};
// associate set of nodes, common bitmask for these nodes,
// and gives the < to compare different pairs
class BitAssociation_t
{
private:
const Associations_t * m_pAssociations;
mutable int m_iBits;
// The key method of subtree selection.
// Most 'heavy' subtrees will be extracted first.
inline int GetWeight() const
{
assert ( m_pAssociations );
int iNodes = m_pAssociations->GetLength();
if ( m_iBits==0 && m_uMask!=0 )
{
for ( uint64_t dMask = m_uMask; dMask; dMask >>=1 )
m_iBits += (int)( dMask & 1 );
}
// current working formula is num_nodes^2 * num_hits
return iNodes * iNodes * m_iBits;
}
public:
uint64_t m_uMask;
BitAssociation_t()
: m_pAssociations ( NULL )
, m_iBits ( 0 )
, m_uMask ( 0 )
{}
void Init ( uint64_t uMask, const Associations_t* dNodes )
{
m_uMask = uMask;
m_pAssociations = dNodes;
m_iBits = 0;
}
bool operator< (const BitAssociation_t& second) const
{
return GetWeight() < second.GetWeight();
}
};
// for pairs of values builds and stores the association "key -> list of values"
class CAssociations_t
: public CSphOrderedHash < Associations_t, uint64_t, IdentityHash_fn, 128 >
{
int m_iBits; // number of non-unique associations
public:
CAssociations_t() : m_iBits ( 0 ) {}
// Add the given pTree into the list of pTrees, associated with given uHash
int Associate ( const XQNode_t * pTree, uint64_t uHash )
{
if ( !Exists ( uHash ) )
Add ( Associations_t(), uHash );
if ( operator[]( uHash ).Associate2nd ( pTree->GetHash() ) )
++m_iBits;
return m_iBits;
}
// merge the existing association of uHash with given chain
void MergeAssociations ( const Associations_t & chain, uint64_t uHash )
{
if ( !Exists ( uHash ) )
Add ( chain, uHash );
else
operator[]( uHash ).Merge ( chain );
}
inline int GetSize() const { return m_iBits; }
};
// The main class for working with common subtrees
class RevealCommon_t : ISphNoncopyable
{
private:
static const int MAX_MULTINODES = 64;
CSphVector<BitMask_t> m_dBitmasks; // all bitmasks for all the nodes
CSphVector<uint64_t> m_dSubQueries; // final vector with roadmap for tree division.
CAssociations_t m_hNodes; // initial accumulator for nodes
CAssociations_t m_hInterSections; // initial accumulator for nodes
CDwordHash m_hBitOrders; // order numbers for found common subnodes
XQOperator_e m_eOp; // my operator which I process
private:
// returns the order for given uHash (if any).
inline int GetBitOrder ( uint64_t uHash ) const
{
if ( !m_hBitOrders.Exists ( uHash ) )
return -1;
return m_hBitOrders[uHash];
}
// recursively scans the whole tree and builds the maps
// where a list of parents associated with every "leaf" nodes (i.e. with children)
bool BuildAssociations ( XQNode_t * pTree )
{
if ( IsAppropriate ( pTree ) )
{
ARRAY_FOREACH ( i, pTree->m_dChildren )
if ( ( !BuildAssociations ( pTree->m_dChildren[i] ) )
|| ( ( m_eOp==pTree->GetOp() )
&& ( m_hNodes.Associate ( pTree, pTree->m_dChildren[i]->GetHash() )>=MAX_MULTINODES ) ) )
{
return false;
}
}
return true;
}
// Find all leafs, non-unique across the tree,
// and associate the order number with every of them
bool CalcCommonNodes ()
{
if ( !m_hNodes.GetSize() )
return false; // there is totally no non-unique leaves
int iBit = 0;
for ( const auto& tNode : m_hNodes )
if ( tNode.second.GetLength() > 1 )
m_hBitOrders.Add ( iBit++, tNode.first );
assert ( m_hNodes.GetSize()==m_hBitOrders.GetLength() );
m_hNodes.Reset(); ///< since from now we don't need this data anymore
return true;
}
// recursively builds for every node the bitmaks
// of common nodes it has as children
void BuildBitmasks ( const XQNode_t * pTree )
{
if ( !IsAppropriate ( pTree ) )
return;
if ( m_eOp==pTree->GetOp() )
{
// calculate the bitmask
int iOrder;
uint64_t dMask = 0;
for ( const XQNode_t* pChild : pTree->m_dChildren )
{
iOrder = GetBitOrder ( pChild->GetHash() );
if ( iOrder>=0 )
dMask |= 1ull << iOrder;
}
// add the bitmask into the array
if ( dMask )
m_dBitmasks.Add ( { pTree, dMask } );
}
// recursively process all the children
for ( const XQNode_t* pChild : pTree->m_dChildren )
BuildBitmasks ( pChild );
}
// Collect all possible intersections of Bitmasks.
// For every non-zero intersection we collect the list of trees which contain it.
void CalcIntersections ()
{
// Round 1. Intersect all content of bitmasks one-by-one.
ARRAY_FOREACH ( i, m_dBitmasks )
for ( int j = i+1; j<m_dBitmasks.GetLength(); ++j )
{
// intersect one-by-one and group (grouping is done by nature of a hash)
uint64_t uMask = m_dBitmasks[i].GetMask() & m_dBitmasks[j].GetMask();
if ( uMask )
{
m_hInterSections.Associate ( m_dBitmasks[i].GetTree(), uMask );
m_hInterSections.Associate ( m_dBitmasks[j].GetTree(), uMask );
}
}
// Round 2. Intersect again all collected intersection one-by-one - until zero.
for ( auto pIt1 = m_hInterSections.begin(); pIt1 != CAssociations_t::end(); ++pIt1 )
{
auto pIt2 = pIt1;
for ( ++pIt2; pIt2 != CAssociations_t::end(); ++pIt2 )
{
assert ( pIt1->first != pIt2->first );
auto uMask = pIt1->first & pIt2->first;
if ( uMask )
{
m_hInterSections.MergeAssociations ( pIt1->second, uMask );
m_hInterSections.MergeAssociations ( pIt2->second, uMask );
}
}
}
}
// create the final kit of common-subsets
// which we will actually reveal (extract) from original trees
void MakeQueries()
{
CSphVector<BitAssociation_t> dSubnodes; // masks for our selected subnodes
dSubnodes.Reserve ( m_hInterSections.GetLength() );
for ( const auto& tInterSection : m_hInterSections )
dSubnodes.Add().Init( tInterSection.first, & tInterSection.second );
// sort by weight descending (weight sorting is hold by operator <)
dSubnodes.RSort();
m_dSubQueries.Reset();
// make the final subtrees vector: get one-by-one from the beginning,
// intresect with all the next and throw out zeros.
// The final subqueries will not be intersected between each other.
int j;
uint64_t uMask;
ARRAY_FOREACH ( i, dSubnodes )
{
uMask = dSubnodes[i].m_uMask;
m_dSubQueries.Add ( uMask );
j = i+1;
while ( j < dSubnodes.GetLength() )
{
if ( !( dSubnodes[j].m_uMask &= ~uMask ) )
dSubnodes.Remove(j);
else
j++;
}
}
}
// Now we finally extract the common subtrees from original tree
// and (recursively) from it's children
void Reorganize ( XQNode_t * pTree )
{
if ( !IsAppropriate ( pTree ) )
return;
if ( m_eOp==pTree->GetOp() )
{
// pBranch is for common subset of children, pOtherChildren is for the rest.
CSphOrderedHash < XQNode_t*, int, IdentityHash_fn, 64 > hBranches;
XQNode_t * pOtherChildren = nullptr;
int iBit;
int iOptimizations = 0;
ARRAY_FOREACH ( i, pTree->m_dChildren )
{
iBit = GetBitOrder ( pTree->m_dChildren[i]->GetHash() );
// works only with children which are actually common with somebody else
if ( iBit>=0 )
{
// since subqueries doesn't intersected between each other,
// the first hit we found in this loop is exactly what we searched.
ARRAY_FOREACH ( j, m_dSubQueries )
if ( ( 1ull << iBit ) & m_dSubQueries[j] )
{
XQNode_t * pNode;
if ( !hBranches.Exists(j) )
{
pNode = new XQNode_t ( pTree->m_dSpec );
pNode->SetOp ( m_eOp, pTree->m_dChildren[i] );
hBranches.Add ( pNode, j );
} else
{
pNode = hBranches[j];
pNode->m_dChildren.Add ( pTree->m_dChildren[i] );
// Count essential subtrees (with at least 2 children)
if ( pNode->m_dChildren.GetLength()==2 )
++iOptimizations;
}
break;
}
// another nodes add to the set of "other" children
} else
{
if ( !pOtherChildren )
{
pOtherChildren = new XQNode_t ( pTree->m_dSpec );
pOtherChildren->SetOp ( m_eOp, pTree->m_dChildren[i] );
} else
pOtherChildren->m_dChildren.Add ( pTree->m_dChildren[i] );
}
}
// we don't reorganize explicit simple case - as no "others" and only one common.
// Also reject optimization if there is nothing to optimize.
if ( ( iOptimizations==0 )
| ( !pOtherChildren && ( hBranches.GetLength()==1 ) ) )
{
if ( pOtherChildren )
pOtherChildren->m_dChildren.Reset();
for ( auto& tBranch : hBranches )
{
assert ( tBranch.second );
tBranch.second->m_dChildren.Reset();
SafeDelete ( tBranch.second );
}
} else
{
// reorganize the tree: replace the common subset to explicit node with
// only common members inside. This will give the the possibility
// to cache the node.
pTree->m_dChildren.Reset();
if ( pOtherChildren )
pTree->m_dChildren.SwapData ( pOtherChildren->m_dChildren );
for ( auto& tBranch : hBranches )
{
if ( tBranch.second->m_dChildren.GetLength()==1 )
{
pTree->m_dChildren.Add ( tBranch.second->m_dChildren[0] );
tBranch.second->m_dChildren.Reset();
SafeDelete ( tBranch.second );
} else
pTree->m_dChildren.Add ( tBranch.second );
}
}
SafeDelete ( pOtherChildren );
}
// recursively process all the children
for ( XQNode_t* pChild : pTree->m_dChildren )
Reorganize ( pChild );
}
public:
explicit RevealCommon_t ( XQOperator_e eOp )
: m_eOp ( eOp )
{}
// actual method for processing tree and reveal (extract) common subtrees
void Transform ( int iXQ, const XQQuery_t * pXQ )
{
// collect all non-unique nodes
for ( int i=0; i<iXQ; i++ )
if ( !BuildAssociations ( pXQ[i].m_pRoot ) )
return;
// count and order all non-unique nodes
if ( !CalcCommonNodes() )
return;
// create and collect bitmask for every node
for ( int i=0; i<iXQ; i++ )
BuildBitmasks ( pXQ[i].m_pRoot );
// intersect all bitmasks one-by-one, and also intersect all intersections
CalcIntersections();
// the die-hard: actually select the set of subtrees which we'll process
MakeQueries();
// ... and finally - process all our trees.
for ( int i=0; i<iXQ; i++ )
Reorganize ( pXQ[i].m_pRoot );
}
};
struct MarkedNode_t
{
const XQNode_t* m_pTree;
int m_iCounter = 1;
bool m_bMarked = false;
int m_iOrder = 0;
explicit MarkedNode_t ( const XQNode_t * pTree=nullptr )
: m_pTree ( pTree )
{}
inline void Mark()
{
++m_iCounter;
m_bMarked = true;
}
inline void Unmark()
{
if ( m_bMarked && m_iCounter>1 )
--m_iCounter;
if ( m_iCounter<2 )
m_bMarked = false;
}
};
typedef CSphOrderedHash < MarkedNode_t, uint64_t, IdentityHash_fn, 128 > CSubtreeHash;
struct XqTreeComparator_t
{
CSphVector<const XQKeyword_t *> m_dTerms1;
CSphVector<const XQKeyword_t *> m_dTerms2;
bool IsEqual ( const XQNode_t * pNode1, const XQNode_t * pNode2 );
bool CheckCollectTerms ( const XQNode_t * pNode1, const XQNode_t * pNode2 );
};
/// check hashes, then check subtrees, then flag
static void FlagCommonSubtrees ( const XQNode_t * pTree, CSubtreeHash & hSubTrees, XqTreeComparator_t & tCmp, bool bFlag, bool bMark = false )
{
if ( !IsAppropriate ( pTree ) )
return;
// we do not yet have any collisions stats,
// but chances are we don't actually need IsEqualTo() at all
uint64_t iHash = pTree->GetHash();
if ( bFlag && hSubTrees.Exists ( iHash ) && tCmp.IsEqual ( hSubTrees [ iHash ].m_pTree, pTree ) )
{
hSubTrees[iHash].Mark();
// we just add all the children but do NOT mark them as common
// so that only the subtree root is marked.
// also we unmark all the cases which were eaten by bigger trees
for ( const XQNode_t * pChild : pTree->m_dChildren )
if ( !hSubTrees.Exists ( pChild->GetHash() ) )
FlagCommonSubtrees ( pChild, hSubTrees, tCmp, false, bMark );
else
FlagCommonSubtrees ( pChild, hSubTrees, tCmp, false );
} else
{
if ( bMark )
hSubTrees.Add ( MarkedNode_t ( pTree ), iHash );
else
hSubTrees[iHash].Unmark();
for ( const XQNode_t* pChild : pTree->m_dChildren )
FlagCommonSubtrees ( pChild, hSubTrees, tCmp, bFlag, bMark );
}
}
static void SignCommonSubtrees ( XQNode_t * pTree, const CSubtreeHash & hSubTrees )
{
if ( !pTree )
return;
uint64_t iHash = pTree->GetHash();
const MarkedNode_t * pCommon = hSubTrees ( iHash );
if ( pCommon && pCommon->m_bMarked )
pTree->TagAsCommon ( pCommon->m_iOrder, pCommon->m_iCounter );
for ( XQNode_t* pChild : pTree->m_dChildren )
SignCommonSubtrees ( pChild, hSubTrees );
}
int sphMarkCommonSubtrees ( int iXQ, const XQQuery_t * pXQ )
{
if ( iXQ<=0 || !pXQ )
return 0;
{ // Optional reorganize tree to extract common parts
RevealCommon_t ( SPH_QUERY_AND ).Transform ( iXQ, pXQ );
RevealCommon_t ( SPH_QUERY_OR ).Transform ( iXQ, pXQ );
}
// flag common subtrees and refcount them
XqTreeComparator_t tCmp;
CSubtreeHash hSubtrees;
for ( int i=0; i<iXQ; i++ )
FlagCommonSubtrees ( pXQ[i].m_pRoot, hSubtrees, tCmp, true, true );
// number marked subtrees and assign them order numbers.
int iOrder = 0;
for ( auto& tSubtree : hSubtrees )
if ( tSubtree.second.m_bMarked )
tSubtree.second.m_iOrder = iOrder++;
// copy the flags and orders to original trees
for ( int i=0; i<iXQ; i++ )
SignCommonSubtrees ( pXQ[i].m_pRoot, hSubtrees );
return iOrder;
}
XQQuery_t * CloneXQQuery ( const XQQuery_t & tQuery )
{
XQQuery_t * pQuery = new XQQuery_t;
pQuery->m_dZones = tQuery.m_dZones;
pQuery->m_bNeedSZlist = tQuery.m_bNeedSZlist;
pQuery->m_bSingleWord = tQuery.m_bSingleWord;
pQuery->m_bEmpty = tQuery.m_bEmpty;
pQuery->m_pRoot = tQuery.m_pRoot ? tQuery.m_pRoot->Clone() : nullptr;
return pQuery;
}
bool XqTreeComparator_t::IsEqual ( const XQNode_t * pNode1, const XQNode_t * pNode2 )
{
// early out check to skip allocations
if ( !pNode1 || !pNode2 || pNode1->GetHash()!=pNode2->GetHash() || pNode1->GetOp()!=pNode2->GetOp () )
return false;
// need reset data from previous compare
m_dTerms1.Resize ( 0 );
m_dTerms2.Resize ( 0 );
// need reserve some space for first compare
m_dTerms1.Reserve ( 64 );
m_dTerms2.Reserve ( 64 );
if ( !CheckCollectTerms ( pNode1, pNode2 ) )
return false;
assert ( m_dTerms1.GetLength ()==m_dTerms2.GetLength () );
if ( !m_dTerms1.GetLength() )
return true;
m_dTerms1.Sort ( Lesser ( [] ( const auto& l, const auto& r ) { return l->m_iAtomPos < r->m_iAtomPos; } ) );
m_dTerms2.Sort ( Lesser ( [] ( const auto& l, const auto& r ) { return l->m_iAtomPos < r->m_iAtomPos; } ) );
if ( m_dTerms1[0]->m_sWord!=m_dTerms2[0]->m_sWord )
return false;
for ( int i=1; i<m_dTerms1.GetLength(); i++ )
{
int iDelta1 = m_dTerms1[i]->m_iAtomPos - m_dTerms1[i-1]->m_iAtomPos;
int iDelta2 = m_dTerms2[i]->m_iAtomPos - m_dTerms2[i-1]->m_iAtomPos;
if ( iDelta1!=iDelta2 || m_dTerms1[i]->m_sWord!=m_dTerms2[i]->m_sWord )
return false;
}
return true;
}
bool XqTreeComparator_t::CheckCollectTerms ( const XQNode_t * pNode1, const XQNode_t * pNode2 )
{
// early out
if ( !pNode1 || !pNode2
|| pNode1->GetHash ()!=pNode2->GetHash () || pNode1->GetOp ()!=pNode2->GetOp ()
|| pNode1->m_dWords.GetLength ()!=pNode2->m_dWords.GetLength ()
|| pNode1->m_dChildren.GetLength ()!=pNode2->m_dChildren.GetLength () )
return false;
// for plain nodes compare keywords
ARRAY_FOREACH ( i, pNode1->m_dWords )
m_dTerms1.Add ( pNode1->m_dWords.Begin() + i );
ARRAY_FOREACH ( i, pNode2->m_dWords )
m_dTerms2.Add ( pNode2->m_dWords.Begin () + i );
// for non-plain nodes compare children
ARRAY_FOREACH ( i, pNode1->m_dChildren )
{
if ( !CheckCollectTerms ( pNode1->m_dChildren[i], pNode2->m_dChildren[i] ) )
return false;
}
return true;
}
class CSphTransformation : public ISphNoncopyable
{
public:
CSphTransformation ( XQNode_t ** ppRoot, const ISphKeywordsStat * pKeywords );
void Transform ();
inline void Dump ( const XQNode_t * pNode, const char * sHeader = "" );
private:
typedef CSphOrderedHash < CSphVector<XQNode_t*>, uint64_t, IdentityHash_fn, 32> HashSimilar_t;
CSphOrderedHash < HashSimilar_t, uint64_t, IdentityHash_fn, 256 > m_hSimilar;
CSphVector<XQNode_t *> m_dRelatedNodes;
const ISphKeywordsStat * m_pKeywords;
XQNode_t ** m_ppRoot;
typedef bool ( *Checker_fn ) ( const XQNode_t * );
private:
void Dump ();
void SetCosts ( XQNode_t * pNode, const CSphVector<XQNode_t *> & dNodes );
int GetWeakestIndex ( const CSphVector<XQNode_t *> & dNodes );
template < typename Group, typename SubGroup >
inline void TreeCollectInfo ( XQNode_t * pParent, Checker_fn pfnChecker );
template < typename Group, typename SubGroup >
inline bool CollectInfo ( XQNode_t * pParent, Checker_fn pfnChecker );
template < typename Excluder, typename Parenter >
inline bool CollectRelatedNodes ( const CSphVector<XQNode_t *> & dSimilarNodes );
// ((A !N) | (B !N)) -> ((A|B) !N)
static bool CheckCommonNot ( const XQNode_t * pNode );
bool TransformCommonNot ();
bool MakeTransformCommonNot ( CSphVector<XQNode_t *> & dSimilarNodes );
// ((A !(N AA)) | (B !(N BB))) -> (((A|B) !N) | (A !AA) | (B !BB)) [ if cost(N) > cost(A) + cost(B) ]
static bool CheckCommonCompoundNot ( const XQNode_t * pNode );
bool TransformCommonCompoundNot ();
bool MakeTransformCommonCompoundNot ( CSphVector<XQNode_t *> & dSimilarNodes );
// ((A (X | AA)) | (B (X | BB))) -> (((A|B) X) | (A AA) | (B BB)) [ if cost(X) > cost(A) + cost(B) ]
static bool CheckCommonSubTerm ( const XQNode_t * pNode );
bool TransformCommonSubTerm ();
void MakeTransformCommonSubTerm ( CSphVector<XQNode_t *> & dX );
// (A | "A B"~N) -> A ; ("A B" | "A B C") -> "A B" ; ("A B"~N | "A B C"~N) -> ("A B"~N)
static bool CheckCommonKeywords ( const XQNode_t * pNode );
bool TransformCommonKeywords ();
// ("X A B" | "Y A B") -> (("X|Y") "A B")
// ("A B X" | "A B Y") -> (("X|Y") "A B")
static bool CheckCommonPhrase ( const XQNode_t * pNode );
bool TransformCommonPhrase ();
void MakeTransformCommonPhrase ( CSphVector<XQNode_t *> & dCommonNodes, int iCommonLen, bool bHeadIsCommon );
// ((A !X) | (A !Y) | (A !Z)) -> (A !(X Y Z))
static bool CheckCommonAndNotFactor ( const XQNode_t * pNode );
bool TransformCommonAndNotFactor ();
bool MakeTransformCommonAndNotFactor ( CSphVector<XQNode_t *> & dSimilarNodes );
// ((A !(N | N1)) | (B !(N | N2))) -> (( (A !N1) | (B !N2) ) !N)
static bool CheckCommonOrNot ( const XQNode_t * pNode );
bool TransformCommonOrNot ();
bool MakeTransformCommonOrNot ( CSphVector<XQNode_t *> & dSimilarNodes );
// The main goal of transformations below is tree clarification and
// further applying of standard transformations above.
// "hung" operand ( AND(OR) node with only 1 child ) appears after an internal transformation
static bool CheckHungOperand ( const XQNode_t * pNode );
bool TransformHungOperand ();
// ((A | B) | C) -> ( A | B | C )
// ((A B) C) -> ( A B C )
static bool CheckExcessBrackets ( const XQNode_t * pNode );
bool TransformExcessBrackets ();
// ((A !N1) !N2) -> (A !(N1 | N2))
static bool CheckExcessAndNot ( const XQNode_t * pNode );
bool TransformExcessAndNot ();
private:
static const uint64_t CONST_GROUP_FACTOR;
struct NullNode
{
static inline uint64_t By ( XQNode_t * ) { return CONST_GROUP_FACTOR; } // NOLINT
static inline const XQNode_t * From ( const XQNode_t * ) { return NULL; } // NOLINT
};
struct CurrentNode
{
static inline uint64_t By ( XQNode_t * p ) { return p->GetFuzzyHash(); }
static inline const XQNode_t * From ( const XQNode_t * p ) { return p; }
};
struct ParentNode
{
static inline uint64_t By ( XQNode_t * p ) { return p->m_pParent->GetFuzzyHash(); }
static inline const XQNode_t * From ( const XQNode_t * p ) { return p->m_pParent; }
};
struct GrandNode
{
static inline uint64_t By ( XQNode_t * p ) { return p->m_pParent->m_pParent->GetFuzzyHash(); }
static inline const XQNode_t * From ( const XQNode_t * p ) { return p->m_pParent->m_pParent; }
};
struct Grand2Node {
static inline uint64_t By ( XQNode_t * p ) { return p->m_pParent->m_pParent->m_pParent->GetFuzzyHash(); }
static inline const XQNode_t * From ( const XQNode_t * p ) { return p->m_pParent->m_pParent->m_pParent; }
};
struct Grand3Node
{
static inline uint64_t By ( XQNode_t * p ) { return p->m_pParent->m_pParent->m_pParent->m_pParent->GetFuzzyHash(); }
static inline const XQNode_t * From ( const XQNode_t * p ) { return p->m_pParent->m_pParent->m_pParent->m_pParent; }
};
};
CSphTransformation::CSphTransformation ( XQNode_t ** ppRoot, const ISphKeywordsStat * pKeywords )
: m_pKeywords ( pKeywords )
, m_ppRoot ( ppRoot )
{
assert ( m_pKeywords!=NULL );
}
const uint64_t CSphTransformation::CONST_GROUP_FACTOR = 0;
template < typename Group, typename SubGroup >
void CSphTransformation::TreeCollectInfo ( XQNode_t * pParent, Checker_fn pfnChecker )
{
if ( pParent )
{
if ( pfnChecker ( pParent ) )
{
// "Similar nodes" are nodes which are suited to a template (like 'COMMON NOT', 'COMMON COMPOND NOT', ...)
uint64_t uGroup = (uint64_t)Group::From ( pParent );
uint64_t uSubGroup = SubGroup::By ( pParent );
HashSimilar_t & hGroup = m_hSimilar.AddUnique ( uGroup );
hGroup.AddUnique ( uSubGroup ).Add ( pParent );
}
ARRAY_FOREACH ( iChild, pParent->m_dChildren )
TreeCollectInfo<Group, SubGroup> ( pParent->m_dChildren[iChild], pfnChecker );
}
}
template < typename Group, typename SubGroup >
bool CSphTransformation::CollectInfo ( XQNode_t * pParent, Checker_fn pfnChecker )
{
( *m_ppRoot )->Check ( true );
m_hSimilar.Reset();
TreeCollectInfo<Group, SubGroup> ( pParent, pfnChecker );
return ( m_hSimilar.GetLength()>0 );
}
void CSphTransformation::SetCosts ( XQNode_t * pNode, const CSphVector<XQNode_t *> & dNodes )
{
assert ( pNode || dNodes.GetLength() );
CSphVector<XQNode_t*> dChildren ( dNodes.GetLength() + 1 );
dChildren[dNodes.GetLength()] = pNode;
ARRAY_FOREACH ( i, dNodes )
{
dChildren[i] = dNodes[i];
dChildren[i]->m_iUser = 0;
}
// collect unknown keywords from all children
CSphVector<CSphKeywordInfo> dKeywords;
SmallStringHash_T<int> hCosts;
ARRAY_FOREACH ( i, dChildren )
{
XQNode_t * pChild = dChildren[i];
ARRAY_FOREACH ( j, pChild->m_dChildren )
{
dChildren.Add ( pChild->m_dChildren[j] );
dChildren.Last()->m_iUser = 0;
assert ( dChildren.Last()->m_pParent==pChild );
}
ARRAY_FOREACH ( j, pChild->m_dWords )
{
const CSphString & sWord = pChild->m_dWords[j].m_sWord;
int * pCost = hCosts ( sWord );
if ( !pCost )
{
Verify ( hCosts.Add ( 0, sWord ) );
dKeywords.Add();
dKeywords.Last().m_sTokenized = sWord;
dKeywords.Last().m_iDocs = 0;
}
}
}
// get keywords info from index dictionary
if ( dKeywords.GetLength() )
{
m_pKeywords->FillKeywords ( dKeywords );
ARRAY_FOREACH ( i, dKeywords )
{
const CSphKeywordInfo & tKeyword = dKeywords[i];
hCosts[tKeyword.m_sTokenized] = tKeyword.m_iDocs;
}
}
// propagate cost bottom-up (from children to parents)
for ( int i=dChildren.GetLength()-1; i>=0; i-- )
{
XQNode_t * pChild = dChildren[i];
int iCost = 0;
ARRAY_FOREACH ( j, pChild->m_dWords )
iCost += hCosts [ pChild->m_dWords[j].m_sWord ];
pChild->m_iUser += iCost;
if ( pChild->m_pParent )
pChild->m_pParent->m_iUser += pChild->m_iUser;
}
}
template < typename Excluder, typename Parenter >
bool CSphTransformation::CollectRelatedNodes ( const CSphVector<XQNode_t *> & dSimilarNodes )
{
m_dRelatedNodes.Resize ( 0 );
ARRAY_FOREACH ( i, dSimilarNodes )
{
// Eval node that should be excluded
const XQNode_t * pExclude = Excluder::From ( dSimilarNodes[i] );
// Eval node that points to related nodes
const XQNode_t * pParent = Parenter::From ( dSimilarNodes[i] );
assert ( &pParent->m_dChildren!=&m_dRelatedNodes );
ARRAY_FOREACH ( j, pParent->m_dChildren )
{
if ( pParent->m_dChildren[j]!=pExclude )
m_dRelatedNodes.Add ( pParent->m_dChildren[j] );
}
}
return ( m_dRelatedNodes.GetLength()>1 );
}
bool CSphTransformation::CheckCommonNot ( const XQNode_t * pNode )
{
if ( !pNode || !pNode->m_pParent || !pNode->m_pParent->m_pParent || !pNode->m_pParent->m_pParent->m_pParent ||
pNode->m_pParent->GetOp()!=SPH_QUERY_NOT || pNode->m_pParent->m_pParent->GetOp()!=SPH_QUERY_ANDNOT ||
pNode->m_pParent->m_pParent->m_pParent->GetOp()!=SPH_QUERY_OR )
{
//
// NOLINT // NOT:
// NOLINT // _______ OR (gGOr) ___________
// NOLINT // / | |
// NOLINT // ... AND NOT (grandAndNot) ...
// NOLINT // / |
// NOLINT // relatedNode NOT (parentNot)
// NOLINT // |
// NOLINT // pNode
//
return false;
}
return true;
}
bool CSphTransformation::TransformCommonNot ()
{
bool bRecollect = false;
for ( auto& tSimSimilar : m_hSimilar )
{
for ( auto& tSimilar : tSimSimilar.second )
{
// Nodes with the same iFuzzyHash
CSphVector<XQNode_t *> & dSimilarNodes = tSimilar.second;
if ( dSimilarNodes.GetLength()<2 )
continue;
if ( CollectRelatedNodes < ParentNode, GrandNode > ( dSimilarNodes ) && MakeTransformCommonNot ( dSimilarNodes ) )
{
bRecollect = true;
// Don't make transformation for other nodes from the same OR-node,
// because query tree was changed and further transformations
// might be invalid.
break;
}
}
}
return bRecollect;
}
int CSphTransformation::GetWeakestIndex ( const CSphVector<XQNode_t *> & dNodes )
{
// Returns index of weakest node from the equal.
// The example of equal nodes:
// "aaa bbb" (PHRASE), "aaa bbb"~10 (PROXIMITY), "aaa bbb"~20 (PROXIMITY)
// Such nodes have the same magic hash value.
// The weakest is "aaa bbb"~20
int iWeakestIndex = 0;
int iProximity = -1;
ARRAY_FOREACH ( i, dNodes )
{
XQNode_t * pNode = dNodes[i];
if ( pNode->GetOp()==SPH_QUERY_PROXIMITY && pNode->m_iOpArg>iProximity )
{
iProximity = pNode->m_iOpArg;
iWeakestIndex = i;
}
}
return iWeakestIndex;
}
bool CSphTransformation::MakeTransformCommonNot ( CSphVector<XQNode_t *> & dSimilarNodes )
{
// Pick weakest node from the equal
// PROXIMITY and PHRASE nodes with same keywords have an equal magic hash
// so they are considered as equal nodes.
int iWeakestIndex = GetWeakestIndex ( dSimilarNodes );
// the weakest node is new parent of transformed expression
XQNode_t * pWeakestAndNot = m_dRelatedNodes[iWeakestIndex]->m_pParent;
assert ( pWeakestAndNot->m_dChildren[0]==m_dRelatedNodes[iWeakestIndex] );
XQNode_t * pCommonOr = pWeakestAndNot->m_pParent;
assert ( pCommonOr->GetOp()==SPH_QUERY_OR && pCommonOr->m_dChildren.Contains ( pWeakestAndNot ) );
XQNode_t * pGrandCommonOr = pCommonOr->m_pParent;
bool bKeepOr = ( pCommonOr->m_dChildren.GetLength()>2 );
// reset ownership of related nodes
ARRAY_FOREACH ( i, m_dRelatedNodes )
{
XQNode_t * pAnd = m_dRelatedNodes[i];
XQNode_t * pAndNot = pAnd->m_pParent;
assert ( pAndNot->m_pParent==pCommonOr );
if ( i!=iWeakestIndex )
{
Verify ( pAndNot->m_dChildren.RemoveValue ( pAnd ) );
if ( bKeepOr )
{
pCommonOr->m_dChildren.RemoveValue ( pAndNot );
SafeDelete ( pAndNot );
}
}
}
// move all related to new OR
XQNode_t * pHubOr = new XQNode_t ( XQLimitSpec_t() );
pHubOr->SetOp ( SPH_QUERY_OR, m_dRelatedNodes );
// insert hub OR via hub AND to new parent ( AND NOT )
XQNode_t * pHubAnd = new XQNode_t ( XQLimitSpec_t() );
pHubAnd->SetOp ( SPH_QUERY_AND, pHubOr );
// replace old AND at new parent ( AND NOT ) 0 already at OR children
pHubAnd->m_pParent = pWeakestAndNot;
pWeakestAndNot->m_dChildren[0] = pHubAnd;
// in case common OR had only 2 children
if ( !bKeepOr )
{
// replace old OR with AND_NOT at parent
if ( !pGrandCommonOr )
{
pWeakestAndNot->m_pParent = NULL;
*m_ppRoot = pWeakestAndNot;
} else
{
pWeakestAndNot->m_pParent = pGrandCommonOr;
CSphVector<XQNode_t *> & dChildren = pGrandCommonOr->m_dChildren;
ARRAY_FOREACH ( i, dChildren )
{
if ( dChildren[i]==pCommonOr )
{
dChildren[i] = pWeakestAndNot;
break;
}
}
}
// remove new parent ( AND OR ) from OR children
Verify ( pCommonOr->m_dChildren.RemoveValue ( pWeakestAndNot ) );
// free OR and all children
SafeDelete ( pCommonOr );
}
return true;
}
bool CSphTransformation::CheckCommonCompoundNot ( const XQNode_t * pNode )
{
if ( !pNode || !pNode->m_pParent || !pNode->m_pParent->m_pParent || !pNode->m_pParent->m_pParent->m_pParent ||
!pNode->m_pParent->m_pParent->m_pParent->m_pParent || pNode->m_pParent->GetOp()!=SPH_QUERY_AND ||
pNode->m_pParent->m_pParent->GetOp()!=SPH_QUERY_NOT || pNode->m_pParent->m_pParent->m_pParent->GetOp()!=SPH_QUERY_ANDNOT ||
pNode->m_pParent->m_pParent->m_pParent->m_pParent->GetOp()!=SPH_QUERY_OR )
{
//
// NOLINT // NOT:
// NOLINT // __ OR (Grand3 = CommonOr) __
// NOLINT // / | |
// NOLINT // ... AND NOT (Grand2) ...
// NOLINT // / |
// NOLINT // relatedNode NOT (grandNot)
// NOLINT // |
// NOLINT // AND (parentAnd)
// NOLINT // / |
// NOLINT // pNode ...
//
return false;
}
return true;
}
bool CSphTransformation::TransformCommonCompoundNot ()
{
bool bRecollect = false;
for ( auto& tSimSimilar : m_hSimilar )
{
for ( auto& tSimilar : tSimSimilar.second )
{
// Nodes with the same iFuzzyHash
CSphVector<XQNode_t *> & dSimilarNodes = tSimilar.second;
if ( dSimilarNodes.GetLength()<2 )
continue;
if ( CollectRelatedNodes < GrandNode, Grand2Node > ( dSimilarNodes ) )
{
// Load cost of the first node from the group
// of the common nodes. The cost of nodes from
// TransformableNodes are the same.
SetCosts ( dSimilarNodes[0], m_dRelatedNodes );
int iCommon = dSimilarNodes[0]->m_iUser;
int iRelated = 0;
ARRAY_FOREACH ( i, m_dRelatedNodes )
iRelated += m_dRelatedNodes[i]->m_iUser;
// Check that optimization willl be useful.
if ( iCommon>iRelated && MakeTransformCommonCompoundNot ( dSimilarNodes ) )
{
bRecollect = true;
// Don't make transformation for other nodes from the same OR-node,
// because qtree was changed and further transformations
// might be invalid.
break;
}
}
}
}
return bRecollect;
}
bool CSphTransformation::MakeTransformCommonCompoundNot ( CSphVector<XQNode_t *> & dSimilarNodes )
{
// Pick weakest node from the equal
// PROXIMITY and PHRASE nodes with same keywords have an equal magic hash
// so they are considered as equal nodes.
int iWeakestIndex = GetWeakestIndex ( dSimilarNodes );
assert ( iWeakestIndex!=-1 );
XQNode_t * pWeakestSimilar = dSimilarNodes [ iWeakestIndex ];
// Common OR node (that is Grand3Node::From)
XQNode_t * pCommonOr = pWeakestSimilar->m_pParent->m_pParent->m_pParent->m_pParent;
// Factor out and delete/unlink similar nodes ( except weakest )
ARRAY_FOREACH ( i, dSimilarNodes )
{
XQNode_t * pParent = dSimilarNodes[i]->m_pParent;
Verify ( pParent->m_dChildren.RemoveValue ( dSimilarNodes[i] ) );
if ( i!=iWeakestIndex )
SafeDelete ( dSimilarNodes[i] );
}
// Create yet another ANDNOT node
// with related nodes and one common node
XQNode_t * pNewNot = new XQNode_t ( XQLimitSpec_t() );
pNewNot->SetOp ( SPH_QUERY_NOT, pWeakestSimilar );
XQNode_t * pNewOr = new XQNode_t ( XQLimitSpec_t() );
pNewOr->SetOp ( SPH_QUERY_OR );
pNewOr->m_dChildren.Resize ( m_dRelatedNodes.GetLength() );
ARRAY_FOREACH ( i, m_dRelatedNodes )
{
// ANDNOT operation implies AND and NOT nodes.
// The related nodes point to AND node that has one child node.
assert ( m_dRelatedNodes[i]->m_dChildren.GetLength()==1 );
pNewOr->m_dChildren[i] = m_dRelatedNodes[i]->m_dChildren[0]->Clone();
pNewOr->m_dChildren[i]->m_pParent = pNewOr;
}
XQNode_t * pNewAnd = new XQNode_t ( XQLimitSpec_t() );
pNewAnd->SetOp ( SPH_QUERY_AND, pNewOr );
XQNode_t * pNewAndNot = new XQNode_t ( XQLimitSpec_t() );
pNewAndNot->SetOp ( SPH_QUERY_ANDNOT, pNewAnd, pNewNot );
pCommonOr->m_dChildren.Add ( pNewAndNot );
pNewAndNot->m_pParent = pCommonOr;
return true;
}
bool CSphTransformation::CheckCommonSubTerm ( const XQNode_t * pNode )
{
if ( !pNode || ( pNode->GetOp()==SPH_QUERY_PHRASE && pNode->m_dChildren.GetLength() )
|| !pNode->m_pParent || !pNode->m_pParent->m_pParent || !pNode->m_pParent->m_pParent->m_pParent ||
pNode->m_pParent->GetOp()!=SPH_QUERY_OR || pNode->m_pParent->m_pParent->GetOp()!=SPH_QUERY_AND ||
pNode->m_pParent->m_pParent->m_pParent->GetOp()!=SPH_QUERY_OR )
{
//
// NOLINT // NOT:
// NOLINT // ________OR (gGOr)
// NOLINT // / |
// NOLINT // ...... AND (grandAnd)
// NOLINT // / |
// NOLINT // relatedNode OR (parentOr)
// NOLINT // / |
// NOLINT // pNode ...
//
return false;
}
return true;
}
bool CSphTransformation::TransformCommonSubTerm ()
{
bool bRecollect = false;
for ( auto& tSimSimilar : m_hSimilar )
{
for ( auto& tSimilar : tSimSimilar.second )
{
// Nodes with the same iFuzzyHash
CSphVector<XQNode_t *> & dX = tSimilar.second;
if ( dX.GetLength()<2 )
continue;
// skip common sub-terms from same tree
bool bSame = false;
for ( int i=0; i<dX.GetLength()-1 && !bSame; i++ )
{
for ( int j=i+1; j<dX.GetLength() && !bSame; j++ )
bSame = ( dX[i]->m_pParent==dX[j]->m_pParent );
}
if ( bSame )
continue;
if ( CollectRelatedNodes < ParentNode, GrandNode > ( dX ) )
{
// Load cost of the first node from the group
// of the common nodes. The cost of nodes from
// TransformableNodes are the same.
SetCosts ( dX[0], m_dRelatedNodes );
int iCostCommonSubTermNode = dX[0]->m_iUser;
int iCostRelatedNodes = 0;
ARRAY_FOREACH ( i, m_dRelatedNodes )
iCostRelatedNodes += m_dRelatedNodes[i]->m_iUser;
// Check that optimization will be useful.
if ( iCostCommonSubTermNode > iCostRelatedNodes )
{
MakeTransformCommonSubTerm ( dX );
bRecollect = true;
// Don't make transformation for other nodes from the same OR-node,
// because query tree was changed and further transformations
// might be invalid.
break;
}
}
}
}
return bRecollect;
}
// remove nodes without children up the tree
static bool SubtreeRemoveEmpty ( XQNode_t * pNode )
{
if ( !pNode->IsEmpty() )
return false;
// climb up
XQNode_t * pParent = pNode->m_pParent;
while ( pParent && pParent->m_dChildren.GetLength()<=1 && !pParent->m_dWords.GetLength() )
{
pNode = pParent;
pParent = pParent->m_pParent;
}
if ( pParent )
pParent->m_dChildren.RemoveValue ( pNode );
// free subtree
SafeDelete ( pNode );
return true;
}
// eliminate composite ( AND \ OR ) nodes with only one children
static void CompositeFixup ( XQNode_t * pNode, XQNode_t ** ppRoot )
{
assert ( pNode && !pNode->m_dWords.GetLength() );
if ( pNode->m_dChildren.GetLength()!=1 || !( pNode->GetOp()==SPH_QUERY_OR || pNode->GetOp()==SPH_QUERY_AND ) )
return;
XQNode_t * pChild = pNode->m_dChildren[0];
pChild->m_pParent = NULL;
pNode->m_dChildren.Resize ( 0 );
// climb up
XQNode_t * pParent = pNode->m_pParent;
while ( pParent && pParent->m_dChildren.GetLength()==1 && !pParent->m_dWords.GetLength() &&
( pParent->GetOp()==SPH_QUERY_OR || pParent->GetOp()==SPH_QUERY_AND ) )
{
pNode = pParent;
pParent = pParent->m_pParent;
}
if ( pParent )
{
ARRAY_FOREACH ( i, pParent->m_dChildren )
{
if ( pParent->m_dChildren[i]!=pNode )
continue;
pParent->m_dChildren[i] = pChild;
pChild->m_pParent = pParent;
break;
}
} else
{
*ppRoot = pChild;
}
// free subtree
SafeDelete ( pNode );
}
static void CleanupSubtree ( XQNode_t * pNode, XQNode_t ** ppRoot )
{
if ( SubtreeRemoveEmpty ( pNode ) )
return;
CompositeFixup ( pNode, ppRoot );
}
void CSphTransformation::MakeTransformCommonSubTerm ( CSphVector<XQNode_t *> & dX )
{
// Pick weakest node from the equal
// PROXIMITY and PHRASE nodes with same keywords have an equal magic hash
// so they are considered as equal nodes.
int iWeakestIndex = GetWeakestIndex ( dX );
XQNode_t * pX = dX[iWeakestIndex];
// common parents of X and AA \ BB need to be excluded
CSphVector<XQNode_t *> dExcluded ( dX.GetLength() );
// Factor out and delete/unlink similar nodes ( except weakest )
ARRAY_FOREACH ( i, dX )
{
XQNode_t * pParent = dX[i]->m_pParent;
Verify ( pParent->m_dChildren.RemoveValue ( dX[i] ) );
if ( i!=iWeakestIndex )
SafeDelete ( dX[i] );
dExcluded[i] = pParent;
pParent->m_pParent->m_dChildren.RemoveValue ( pParent );
}
CSphVector<XQNode_t *> dRelatedParents;
for ( int i=0; i<m_dRelatedNodes.GetLength(); i++ )
{
XQNode_t * pParent = m_dRelatedNodes[i]->m_pParent;
if ( !dRelatedParents.Contains ( pParent ) )
dRelatedParents.Add ( pParent );
}
ARRAY_FOREACH ( i, dRelatedParents )
dRelatedParents[i] = dRelatedParents[i]->Clone();
// push excluded children back
ARRAY_FOREACH ( i, dExcluded )
{
XQNode_t * pChild = dExcluded[i];
pChild->m_pParent->m_dChildren.Add ( pChild );
}
XQNode_t * pNewOr = new XQNode_t ( XQLimitSpec_t() );
pNewOr->SetOp ( SPH_QUERY_OR, dRelatedParents );
// Create yet another AND node
// with related nodes and one common dSimilar node
XQNode_t * pCommonOr = pX->m_pParent->m_pParent->m_pParent;
XQNode_t * pNewAnd = new XQNode_t ( XQLimitSpec_t() );
pNewAnd->SetOp ( SPH_QUERY_AND, pNewOr, pX );
pCommonOr->m_dChildren.Add ( pNewAnd );
pNewAnd->m_pParent = pCommonOr;
ARRAY_FOREACH ( i, dExcluded )
{
CleanupSubtree ( dExcluded[i], m_ppRoot );
}
}
bool CSphTransformation::CheckCommonKeywords ( const XQNode_t * pNode )
{
if ( !pNode || !pNode->m_pParent || pNode->m_pParent->GetOp()!=SPH_QUERY_OR || !pNode->m_dWords.GetLength() )
{
//
// NOLINT // NOT:
// NOLINT // ______________________ OR (parentOr) _______
// NOLINT // / | |
// NOLINT // pNode (PHRASE|AND|PROXIMITY) ... ...
//
return false;
}
return true;
}
typedef CSphOrderedHash<CSphVector<XQNode_t *>, uint64_t, IdentityHash_fn, 128> BigramHash_t;
static int sphBigramAddNode ( XQNode_t * pNode, int64_t uHash, BigramHash_t & hBirgam )
{
CSphVector<XQNode_t *> * ppNodes = hBirgam ( uHash );
if ( !ppNodes )
{
CSphVector<XQNode_t *> dNode ( 1 );
dNode[0] = pNode;
hBirgam.Add ( dNode, uHash );
return 1;
} else
{
(*ppNodes).Add ( pNode );
return (*ppNodes).GetLength();
}
}
static const BYTE g_sPhraseDelimiter[] = { 1 };
static uint64_t sphHashPhrase ( const XQNode_t * pNode )
{
assert ( pNode );
uint64_t uHash = SPH_FNV64_SEED;
ARRAY_FOREACH ( i, pNode->m_dWords )
{
if ( i )
uHash = sphFNV64 ( g_sPhraseDelimiter, sizeof(g_sPhraseDelimiter), uHash );
uHash = sphFNV64cont ( pNode->m_dWords[i].m_sWord.cstr(), uHash );
}
return uHash;
}
static void sphHashSubphrases ( XQNode_t * pNode, BigramHash_t & hBirgam )
{
assert ( pNode );
// skip whole phrase
if ( pNode->m_dWords.GetLength()<=1 )
return;
const CSphVector<XQKeyword_t> & dWords = pNode->m_dWords;
int iLen = dWords.GetLength();
for ( int i=0; i<iLen; i++ )
{
uint64_t uSubPhrase = sphFNV64cont ( dWords[i].m_sWord.cstr(), SPH_FNV64_SEED );
sphBigramAddNode ( pNode, uSubPhrase, hBirgam );
// skip whole phrase
int iSubLen = ( i==0 ? iLen-1 : iLen );
for ( int j=i+1; j<iSubLen; j++ )
{
uSubPhrase = sphFNV64 ( g_sPhraseDelimiter, sizeof(g_sPhraseDelimiter), uSubPhrase );
uSubPhrase = sphFNV64cont ( dWords[j].m_sWord.cstr(), uSubPhrase );
sphBigramAddNode ( pNode, uSubPhrase, hBirgam );
}
}
// loop all children
ARRAY_FOREACH ( i, pNode->m_dChildren )
sphHashSubphrases ( pNode->m_dChildren[i], hBirgam );
}
static bool sphIsNodeStrongest ( const XQNode_t * pNode, const CSphVector<XQNode_t *> & dSimilar )
{
//
// The cases when query won't be optimized:
// 1. Proximities with different distance: "A B C"~N | "A B C D"~M (N != M)
// 2. Partial intersection in the middle: "A B C D" | "D B C E" (really they won't be found)
// 3. Weaker phrase for proximity. Example: "A B C D"~N | "B C"
//
// The cases when query will be optimized:
// 1. Found weaker term (phrase or proximity type) for sub-query with phrase type.
// Examples:
// "D A B C E" | "A B C" (weaker phrase) => "A B C"
// "A B C D E" | "B C D"~N (weaker proximity) => "B C D"~N
//
// 2. Equal proximities with the different distance.
// Example: "A B C"~N | "A B C"~M => "A B C"~min(M,N)
//
// 3. Found weaker term with proximity type with the same distance.
// Example: "D A B C E"~N | "A B"~N => "A B"~N
//
assert ( pNode );
XQOperator_e eNode = pNode->GetOp();
int iWords = pNode->m_dWords.GetLength();
ARRAY_FOREACH ( i, dSimilar )
{
XQOperator_e eSimilar = dSimilar[i]->GetOp();
int iSimilarWords = dSimilar[i]->m_dWords.GetLength();
if ( eNode==SPH_QUERY_PROXIMITY && eSimilar==SPH_QUERY_PROXIMITY && iWords>iSimilarWords )
return false;
if ( ( eNode==SPH_QUERY_PHRASE || eNode==SPH_QUERY_AND ) && ( eSimilar==SPH_QUERY_PROXIMITY && ( iWords>1 || pNode->m_dChildren.GetLength() ) ) )
return false;
bool bSimilar = ( ( eNode==SPH_QUERY_PHRASE && eSimilar==SPH_QUERY_PHRASE ) ||
( ( eNode==SPH_QUERY_PHRASE || eNode==SPH_QUERY_AND ) && ( eSimilar==SPH_QUERY_PHRASE || eSimilar==SPH_QUERY_PROXIMITY ) ) ||
( eNode==SPH_QUERY_PROXIMITY && ( eSimilar==SPH_QUERY_AND || eSimilar==SPH_QUERY_PHRASE ) ) ||
( eNode==SPH_QUERY_PROXIMITY && eSimilar==SPH_QUERY_PROXIMITY && pNode->m_iOpArg>=dSimilar[i]->m_iOpArg ) );
if ( !bSimilar )
return false;
}
return true;
}
bool CSphTransformation::TransformCommonKeywords ()
{
CSphVector <XQNode_t *> dPendingDel;
for ( auto& tSimSimilar : m_hSimilar )
{
BigramHash_t hBigrams;
for ( auto& tSimilar : tSimSimilar.second )
{
// Nodes with the same iFuzzyHash
CSphVector<XQNode_t *> & dPhrases = tSimilar.second;
if ( dPhrases.GetLength()<2 )
continue;
ARRAY_FOREACH ( i, dPhrases )
sphHashSubphrases ( dPhrases[i], hBigrams );
ARRAY_FOREACH ( i, dPhrases )
{
XQNode_t * pNode = dPhrases[i];
uint64_t uPhraseHash = sphHashPhrase ( pNode );
CSphVector<XQNode_t *> * ppCommon = hBigrams ( uPhraseHash );
if ( ppCommon && sphIsNodeStrongest ( pNode, *ppCommon ) )
{
ARRAY_FOREACH ( j, (*ppCommon) )
dPendingDel.Add ( (*ppCommon)[j] );
}
}
}
}
bool bTransformed = ( dPendingDel.GetLength()>0 );
dPendingDel.Sort();
// Delete stronger terms
XQNode_t * pLast = NULL;
ARRAY_FOREACH ( i, dPendingDel )
{
// skip dupes
if ( pLast==dPendingDel[i] )
continue;
pLast = dPendingDel[i];
Verify ( pLast->m_pParent->m_dChildren.RemoveValue ( pLast ) );
// delete here (not SafeDelete) as later that pointer will be compared
delete ( dPendingDel[i] );
}
return bTransformed;
}
// minimum words per phrase that might be optimized by CommonSuffix optimization
bool CSphTransformation::CheckCommonPhrase ( const XQNode_t * pNode )
{
if ( !pNode || !pNode->m_pParent || pNode->m_pParent->GetOp()!=SPH_QUERY_OR || pNode->GetOp()!=SPH_QUERY_PHRASE || pNode->m_dWords.GetLength()<2 )
{
//
// NOLINT // NOT:
// NOLINT // ______________________ OR (parentOr) ___
// NOLINT // / | |
// NOLINT // pNode (PHRASE) ... ...
//
return false;
}
// single word phrase not allowed
assert ( pNode->m_dWords.GetLength()>=2 );
// phrase there words not one after another not allowed
for ( int i=1; i<pNode->m_dWords.GetLength(); i++ )
{
if ( pNode->m_dWords[i].m_iAtomPos-pNode->m_dWords[i-1].m_iAtomPos!=1 )
return false;
}
return true;
}
struct CommonInfo_t
{
CSphVector<XQNode_t *> * m_pPhrases;
int m_iCommonLen;
bool m_bHead;
bool m_bHasBetter;
};
struct Node2Common_t
{
XQNode_t * m_pNode;
CommonInfo_t * m_pCommon;
};
struct CommonDupElemination_fn
{
bool IsLess ( const Node2Common_t & a, const Node2Common_t & b ) const
{
if ( a.m_pNode!=b.m_pNode )
return a.m_pNode<b.m_pNode;
if ( a.m_pCommon->m_iCommonLen!=b.m_pCommon->m_iCommonLen )
return a.m_pCommon->m_iCommonLen>b.m_pCommon->m_iCommonLen;
return a.m_pCommon->m_bHead;
}
};
struct XQNodeAtomPos_fn
{
bool IsLess ( const XQNode_t * a, const XQNode_t * b ) const
{
return a->m_iAtomPos<b->m_iAtomPos;
}
};
bool CSphTransformation::TransformCommonPhrase ()
{
bool bRecollect = false;
for ( auto& tSimSimilar : m_hSimilar )
{
for ( auto& tSimilar : tSimSimilar.second )
{
// Nodes with the same iFuzzyHash
CSphVector<XQNode_t *> & dNodes = tSimilar.second;
if ( dNodes.GetLength()<2 )
continue;
bool bHasCommonPhrases = false;
BigramHash_t tBigramHead;
BigramHash_t tBigramTail;
// 1st check only 2 words at head tail at phrases
ARRAY_FOREACH ( iPhrase, dNodes )
{
const CSphVector<XQKeyword_t> & dWords = dNodes[iPhrase]->m_dWords;
assert ( dWords.GetLength()>=2 );
dNodes[iPhrase]->m_iAtomPos = dWords.Begin()->m_iAtomPos;
uint64_t uHead = sphFNV64cont ( dWords[0].m_sWord.cstr(), SPH_FNV64_SEED );
uint64_t uTail = sphFNV64cont ( dWords [ dWords.GetLength() - 1 ].m_sWord.cstr(), SPH_FNV64_SEED );
uHead = sphFNV64 ( g_sPhraseDelimiter, sizeof(g_sPhraseDelimiter), uHead );
uHead = sphFNV64cont ( dWords[1].m_sWord.cstr(), uHead );
uTail = sphFNV64 ( g_sPhraseDelimiter, sizeof(g_sPhraseDelimiter), uTail );
uTail = sphFNV64cont ( dWords[dWords.GetLength()-2].m_sWord.cstr(), uTail );
int iHeadLen = sphBigramAddNode ( dNodes[iPhrase], uHead, tBigramHead );
int iTailLen = sphBigramAddNode ( dNodes[iPhrase], uTail, tBigramTail );
bHasCommonPhrases |= ( iHeadLen>1 || iTailLen>1 );
}
if ( !bHasCommonPhrases )
continue;
// 2nd step find minimum for each phrases group
CSphVector<CommonInfo_t> dCommon;
for ( auto& tBigram : tBigramHead )
{
// only phrases that share same words at head
if ( tBigram.second.GetLength()<2 )
continue;
CommonInfo_t & tElem = dCommon.Add();
tElem.m_pPhrases = &tBigram.second;
tElem.m_iCommonLen = 2;
tElem.m_bHead = true;
tElem.m_bHasBetter = false;
}
for ( auto& tBigram : tBigramTail )
{
// only phrases that share same words at tail
if ( tBigram.second.GetLength()<2 )
continue;
CommonInfo_t & tElem = dCommon.Add();
tElem.m_pPhrases = &tBigram.second;
tElem.m_iCommonLen = 2;
tElem.m_bHead = false;
tElem.m_bHasBetter = false;
}
// for each set of phrases with common words at the head or tail
// each word that is same at all phrases makes common length longer
ARRAY_FOREACH ( i, dCommon )
{
CommonInfo_t & tCommon = dCommon[i];
bool bHead = tCommon.m_bHead;
const CSphVector<XQNode_t *> & dPhrases = *tCommon.m_pPhrases;
// start from third word ( two words at each phrase already matched at bigram hashing )
for ( int iCount=3; ; iCount++ )
{
// is shortest phrase words over
if ( iCount>=dPhrases[0]->m_dWords.GetLength() )
break;
int iWordRef = ( bHead ? iCount-1 : dPhrases[0]->m_dWords.GetLength() - iCount );
uint64_t uHash = sphFNV64 ( dPhrases[0]->m_dWords[iWordRef].m_sWord.cstr() );
bool bPhrasesMatch = false;
bool bSomePhraseOver = false;
for ( int iPhrase=1; iPhrase<dPhrases.GetLength(); iPhrase++ )
{
bSomePhraseOver = ( iCount>=dPhrases[iPhrase]->m_dWords.GetLength() );
if ( bSomePhraseOver )
break;
int iWord = ( bHead ? iCount-1 : dPhrases[iPhrase]->m_dWords.GetLength() - iCount );
bPhrasesMatch = ( uHash==sphFNV64 ( dPhrases[iPhrase]->m_dWords[iWord].m_sWord.cstr() ) );
if ( !bPhrasesMatch )
break;
}
// no need to check further in case shortest phrase has no more words or sequence over
if ( bSomePhraseOver || !bPhrasesMatch )
break;
tCommon.m_iCommonLen = iCount;
}
}
// mark all dupes (that has smaller common length) as deleted
if ( dCommon.GetLength()>=2 )
{
CSphVector<Node2Common_t> dDups ( dCommon.GetLength()*2 );
dDups.Resize ( 0 );
ARRAY_FOREACH ( i, dCommon )
{
CommonInfo_t & tCommon = dCommon[i];
CSphVector<XQNode_t *> & dPhrases = *tCommon.m_pPhrases;
ARRAY_FOREACH ( j, dPhrases )
{
Node2Common_t & tDup = dDups.Add();
tDup.m_pNode = dPhrases[j];
tDup.m_pCommon = &tCommon;
}
}
dDups.Sort ( CommonDupElemination_fn() );
for ( int i=0; i<dDups.GetLength()-1; i++ )
{
Node2Common_t & tCurr = dDups[i];
Node2Common_t & tNext = dDups[i+1];
if ( tCurr.m_pNode==tNext.m_pNode )
{
if ( tCurr.m_pCommon->m_iCommonLen<=tNext.m_pCommon->m_iCommonLen )
tCurr.m_pCommon->m_bHasBetter = true;
else
tNext.m_pCommon->m_bHasBetter = true;
}
}
}
ARRAY_FOREACH ( i, dCommon )
{
const CommonInfo_t & tElem = dCommon[i];
if ( !tElem.m_bHasBetter )
{
tElem.m_pPhrases->Sort ( XQNodeAtomPos_fn() );
MakeTransformCommonPhrase ( *tElem.m_pPhrases, tElem.m_iCommonLen, tElem.m_bHead );
bRecollect = true;
}
}
}
}
return bRecollect;
}
void CSphTransformation::MakeTransformCommonPhrase ( CSphVector<XQNode_t *> & dCommonNodes, int iCommonLen, bool bHeadIsCommon )
{
XQNode_t * pCommonPhrase = new XQNode_t ( XQLimitSpec_t() );
pCommonPhrase->SetOp ( SPH_QUERY_PHRASE );
XQNode_t * pGrandOr = dCommonNodes[0]->m_pParent;
if ( bHeadIsCommon )
{
// fill up common suffix
XQNode_t * pPhrase = dCommonNodes[0];
pCommonPhrase->m_iAtomPos = pPhrase->m_dWords[0].m_iAtomPos;
for ( int i=0; i<iCommonLen; i++ )
pCommonPhrase->m_dWords.Add ( pPhrase->m_dWords[i] );
} else
{
XQNode_t * pPhrase = dCommonNodes[0];
// set the farthest atom position
int iAtomPos = pPhrase->m_dWords [ pPhrase->m_dWords.GetLength() - iCommonLen ].m_iAtomPos;
for ( int i=1; i<dCommonNodes.GetLength(); i++ )
{
XQNode_t * pCur = dCommonNodes[i];
int iCurAtomPos = pCur->m_dWords[pCur->m_dWords.GetLength() - iCommonLen].m_iAtomPos;
if ( iAtomPos < iCurAtomPos )
{
pPhrase = pCur;
iAtomPos = iCurAtomPos;
}
}
pCommonPhrase->m_iAtomPos = iAtomPos;
for ( int i=pPhrase->m_dWords.GetLength() - iCommonLen; i<pPhrase->m_dWords.GetLength(); i++ )
pCommonPhrase->m_dWords.Add ( pPhrase->m_dWords[i] );
}
XQNode_t * pNewOr = new XQNode_t ( XQLimitSpec_t() );
pNewOr->SetOp ( SPH_QUERY_OR );
ARRAY_FOREACH ( i, dCommonNodes )
{
XQNode_t * pPhrase = dCommonNodes[i];
// remove phrase from parent and eliminate in case of common phrase duplication
Verify ( pGrandOr->m_dChildren.RemoveValue ( pPhrase ) );
if ( pPhrase->m_dWords.GetLength()==iCommonLen )
{
SafeDelete ( pPhrase );
continue;
}
// move phrase to new OR
pNewOr->m_dChildren.Add ( pPhrase );
pPhrase->m_pParent = pNewOr;
// shift down words and enumerate words atom positions
if ( bHeadIsCommon )
{
int iEndCommonAtom = pCommonPhrase->m_dWords.Last().m_iAtomPos+1;
for ( int j=iCommonLen; j<pPhrase->m_dWords.GetLength(); j++ )
{
int iTo = j-iCommonLen;
pPhrase->m_dWords[iTo] = pPhrase->m_dWords[j];
pPhrase->m_dWords[iTo].m_iAtomPos = iEndCommonAtom + iTo;
}
}
pPhrase->m_dWords.Resize ( pPhrase->m_dWords.GetLength() - iCommonLen );
if ( !bHeadIsCommon )
{
int iStartAtom = pCommonPhrase->m_dWords[0].m_iAtomPos - pPhrase->m_dWords.GetLength();
ARRAY_FOREACH ( j, pPhrase->m_dWords )
pPhrase->m_dWords[j].m_iAtomPos = iStartAtom + j;
}
if ( pPhrase->m_dWords.GetLength()==1 )
pPhrase->SetOp ( SPH_QUERY_AND );
}
if ( pNewOr->m_dChildren.GetLength() )
{
// parent phrase need valid atom position of children
pNewOr->m_iAtomPos = pNewOr->m_dChildren[0]->m_dWords[0].m_iAtomPos;
XQNode_t * pNewPhrase = new XQNode_t ( XQLimitSpec_t() );
if ( bHeadIsCommon )
pNewPhrase->SetOp ( SPH_QUERY_PHRASE, pCommonPhrase, pNewOr );
else
pNewPhrase->SetOp ( SPH_QUERY_PHRASE, pNewOr, pCommonPhrase );
pGrandOr->m_dChildren.Add ( pNewPhrase );
pNewPhrase->m_pParent = pGrandOr;
} else
{
// common phrases with same words elimination
pGrandOr->m_dChildren.Add ( pCommonPhrase );
pCommonPhrase->m_pParent = pGrandOr;
SafeDelete ( pNewOr );
}
}
bool CSphTransformation::CheckCommonAndNotFactor ( const XQNode_t * pNode )
{
if ( !pNode || !pNode->m_pParent || !pNode->m_pParent->m_pParent || !pNode->m_pParent->m_pParent->m_pParent ||
pNode->m_pParent->GetOp()!=SPH_QUERY_AND || pNode->m_pParent->m_pParent->GetOp()!=SPH_QUERY_ANDNOT ||
pNode->m_pParent->m_pParent->m_pParent->GetOp()!=SPH_QUERY_OR ||
// FIXME!!! check performance with OR node at 2nd grand instead of regular not NOT
pNode->m_pParent->m_pParent->m_dChildren.GetLength()<2 || pNode->m_pParent->m_pParent->m_dChildren[1]->GetOp()!=SPH_QUERY_NOT )
{
//
// NOLINT // NOT:
// NOLINT // _______ OR (gGOr) ________________
// NOLINT // / | |
// NOLINT // ... AND NOT (grandAndNot) ...
// NOLINT // / |
// NOLINT // AND NOT
// NOLINT // | |
// NOLINT // pNode relatedNode
//
return false;
}
return true;
}
bool CSphTransformation::TransformCommonAndNotFactor ()
{
bool bRecollect = false;
for ( auto& tSimSimilar : m_hSimilar )
{
for ( auto& tSimilar : tSimSimilar.second )
{
// Nodes with the same iFuzzyHash
CSphVector<XQNode_t *> & dSimilarNodes = tSimilar.second;
if ( dSimilarNodes.GetLength()<2 )
continue;
if ( MakeTransformCommonAndNotFactor ( dSimilarNodes ) )
bRecollect = true;
}
}
return bRecollect;
}
bool CSphTransformation::MakeTransformCommonAndNotFactor ( CSphVector<XQNode_t *> & dSimilarNodes )
{
// Pick weakest node from the equal
// PROXIMITY and PHRASE nodes with same keywords have an equal magic hash
// so they are considered as equal nodes.
int iWeakestIndex = GetWeakestIndex ( dSimilarNodes );
XQNode_t * pFirstAndNot = dSimilarNodes [iWeakestIndex]->m_pParent->m_pParent;
XQNode_t * pCommonOr = pFirstAndNot->m_pParent;
assert ( pFirstAndNot->m_dChildren.GetLength()==2 );
XQNode_t * pFirstNot = pFirstAndNot->m_dChildren[1];
assert ( pFirstNot->m_dChildren.GetLength()==1 );
XQNode_t * pAndNew = new XQNode_t ( XQLimitSpec_t() );
pAndNew->SetOp ( SPH_QUERY_AND );
pAndNew->m_dChildren.Reserve ( dSimilarNodes.GetLength() );
pAndNew->m_dChildren.Add ( pFirstNot->m_dChildren[0] );
pAndNew->m_dChildren.Last()->m_pParent = pAndNew;
pFirstNot->m_dChildren[0] = pAndNew;
pAndNew->m_pParent = pFirstNot;
for ( int i=0; i<dSimilarNodes.GetLength(); ++i )
{
assert ( CheckCommonAndNotFactor ( dSimilarNodes[i] ) );
if ( i==iWeakestIndex )
continue;
XQNode_t * pAndNot = dSimilarNodes[i]->m_pParent->m_pParent;
assert ( pAndNot->m_dChildren.GetLength()==2 );
XQNode_t * pNot = pAndNot->m_dChildren[1];
assert ( pNot->m_dChildren.GetLength()==1 );
assert ( &pAndNew->m_dChildren!=&pNot->m_dChildren );
pAndNew->m_dChildren.Add ( pNot->m_dChildren[0] );
pAndNew->m_dChildren.Last()->m_pParent = pAndNew;
pNot->m_dChildren[0] = NULL;
Verify ( pCommonOr->m_dChildren.RemoveValue ( pAndNot ) );
dSimilarNodes[i] = NULL;
SafeDelete ( pAndNot );
}
return true;
}
bool CSphTransformation::CheckCommonOrNot ( const XQNode_t * pNode )
{
if ( !pNode || !pNode->m_pParent || !pNode->m_pParent->m_pParent || !pNode->m_pParent->m_pParent->m_pParent ||
!pNode->m_pParent->m_pParent->m_pParent->m_pParent || pNode->m_pParent->GetOp()!=SPH_QUERY_OR ||
pNode->m_pParent->m_pParent->GetOp()!=SPH_QUERY_NOT ||
pNode->m_pParent->m_pParent->m_pParent->GetOp()!=SPH_QUERY_ANDNOT ||
pNode->m_pParent->m_pParent->m_pParent->m_pParent->GetOp()!=SPH_QUERY_OR )
{
//
// NOLINT // NOT:
// NOLINT // __ OR (Grand3 = CommonOr) __
// NOLINT // / | |
// NOLINT // ... AND NOT (Grand2) ...
// NOLINT // / |
// NOLINT // relatedNode NOT (grandNot)
// NOLINT // |
// NOLINT // OR (parentOr)
// NOLINT // / |
// NOLINT // pNode ...
//
return false;
}
return true;
}
bool CSphTransformation::TransformCommonOrNot ()
{
bool bRecollect = false;
for ( auto& tSimSimilar : m_hSimilar )
{
for ( auto& tSimilar : tSimSimilar.second )
{
// Nodes with the same iFuzzyHash
CSphVector<XQNode_t *> & dSimilarNodes = tSimilar.second;
if ( dSimilarNodes.GetLength()<2 )
continue;
if ( CollectRelatedNodes < GrandNode, Grand2Node > ( dSimilarNodes ) && MakeTransformCommonOrNot ( dSimilarNodes ) )
{
bRecollect = true;
// Don't make transformation for other nodes from the same OR-node,
// because query tree was changed and further transformations
// might be invalid.
break;
}
}
}
return bRecollect;
}
bool CSphTransformation::MakeTransformCommonOrNot ( CSphVector<XQNode_t *> & dSimilarNodes )
{
// Pick weakest node from the equal
// PROXIMITY and PHRASE nodes with same keywords have an equal magic hash
// so they are considered as equal nodes.
int iWeakestIndex = GetWeakestIndex ( dSimilarNodes );
assert ( iWeakestIndex!=-1 );
XQNode_t * pWeakestSimilar = dSimilarNodes [ iWeakestIndex ];
// Common OR node (that is Grand3Node::From)
XQNode_t * pCommonOr = pWeakestSimilar->m_pParent->m_pParent->m_pParent->m_pParent;
// Delete/unlink similar nodes ( except weakest )
ARRAY_FOREACH ( i, dSimilarNodes )
{
XQNode_t * pParent = dSimilarNodes[i]->m_pParent;
Verify ( pParent->m_dChildren.RemoveValue ( dSimilarNodes[i] ) );
if ( i!=iWeakestIndex )
SafeDelete ( dSimilarNodes[i] );
}
XQNode_t * pNewAndNot = new XQNode_t ( XQLimitSpec_t() );
XQNode_t * pNewAnd = new XQNode_t ( XQLimitSpec_t() );
XQNode_t * pNewNot = new XQNode_t ( XQLimitSpec_t() );
if ( !pCommonOr->m_pParent )
{
*m_ppRoot = pNewAndNot;
} else
{
pNewAndNot->m_pParent = pCommonOr->m_pParent;
assert ( pCommonOr->m_pParent->m_dChildren.Contains ( pCommonOr ) );
ARRAY_FOREACH ( i, pCommonOr->m_pParent->m_dChildren )
{
if ( pCommonOr->m_pParent->m_dChildren[i]==pCommonOr )
pCommonOr->m_pParent->m_pParent->m_dChildren[i] = pNewAndNot;
}
}
pNewAnd->SetOp ( SPH_QUERY_AND, pCommonOr );
pNewNot->SetOp ( SPH_QUERY_NOT, pWeakestSimilar );
pNewAndNot->SetOp ( SPH_QUERY_ANDNOT, pNewAnd, pNewNot );
return true;
}
bool CSphTransformation::CheckHungOperand ( const XQNode_t * pNode )
{
if ( !pNode || !pNode->m_pParent ||
( pNode->m_pParent->GetOp()!=SPH_QUERY_OR && pNode->m_pParent->GetOp()!=SPH_QUERY_AND ) ||
( pNode->m_pParent->m_pParent && pNode->m_pParent->GetOp()==SPH_QUERY_AND &&
pNode->m_pParent->m_pParent->GetOp()==SPH_QUERY_ANDNOT ) ||
pNode->m_pParent->m_dChildren.GetLength()>1 || pNode->m_dWords.GetLength() )
{
//
// NOLINT // NOT:
// NOLINT // OR|AND (parent)
// NOLINT // |
// NOLINT // pNode\?
//
return false;
}
return true;
}
bool CSphTransformation::TransformHungOperand ()
{
if ( !m_hSimilar.GetLength() || !m_hSimilar.Exists ( CONST_GROUP_FACTOR ) || !m_hSimilar[CONST_GROUP_FACTOR].Exists ( CONST_GROUP_FACTOR ) )
return false;
CSphVector<XQNode_t *> & dSimilarNodes = m_hSimilar[CONST_GROUP_FACTOR][CONST_GROUP_FACTOR];
ARRAY_FOREACH ( i, dSimilarNodes )
{
XQNode_t * pHungNode = dSimilarNodes[i];
XQNode_t * pParent = pHungNode->m_pParent;
XQNode_t * pGrand = pParent->m_pParent;
if ( !pGrand )
{
*m_ppRoot = pHungNode;
pHungNode->m_pParent = NULL;
} else
{
assert ( pGrand->m_dChildren.Contains ( pParent ) );
ARRAY_FOREACH ( j, pGrand->m_dChildren )
{
if ( pGrand->m_dChildren[j]!=pParent )
continue;
pGrand->m_dChildren[j] = pHungNode;
pHungNode->m_pParent = pGrand;
break;
}
}
pParent->m_dChildren[0] = NULL;
SafeDelete ( pParent );
}
return true;
}
bool CSphTransformation::CheckExcessBrackets ( const XQNode_t * pNode )
{
if ( !pNode || !pNode->m_pParent || !pNode->m_pParent->m_pParent ||
!( ( pNode->m_pParent->GetOp()==SPH_QUERY_AND && !pNode->m_pParent->m_dWords.GetLength() &&
pNode->m_pParent->m_pParent->GetOp()==SPH_QUERY_AND ) ||
( pNode->m_pParent->GetOp()==SPH_QUERY_OR && pNode->m_pParent->m_pParent->GetOp()==SPH_QUERY_OR ) ) )
{
//
// NOLINT // NOT:
// NOLINT // OR|AND (grand)
// NOLINT // / |
// NOLINT // OR|AND (parent) ...
// NOLINT // |
// NOLINT // pNode
//
return false;
}
return true;
}
static XQNode_t * sphMoveSiblingsUp ( XQNode_t * pNode )
{
XQNode_t * pParent = pNode->m_pParent;
assert ( pParent );
XQNode_t * pGrand = pParent->m_pParent;
assert ( pGrand );
assert ( pGrand->m_dChildren.Contains ( pParent ) );
int iParent = GetNodeChildIndex ( pGrand, pParent );
int iParentChildren = pParent->m_dChildren.GetLength();
int iGrandChildren = pGrand->m_dChildren.GetLength();
int iTotalChildren = iParentChildren+iGrandChildren-1;
// parent.children + grand.parent.children - parent itself
CSphVector<XQNode_t *> dChildren ( iTotalChildren );
// grand head prior parent
for ( int i=0; i<iParent; i++ )
dChildren[i] = pGrand->m_dChildren[i];
// grand tail after parent
for ( int i=0; i<iGrandChildren-iParent-1; i++ )
dChildren[i+iParent+iParentChildren] = pGrand->m_dChildren[i+iParent+1];
// all parent children
for ( int i=0; i<iParentChildren; i++ )
{
XQNode_t * pChild = pParent->m_dChildren[i];
pChild->m_pParent = pGrand;
dChildren[i+iParent] = pChild;
}
pGrand->m_dChildren.SwapData ( dChildren );
// all children at grand now
pParent->m_dChildren.Resize(0);
delete ( pParent );
return nullptr;
}
struct XQNodeHash_fn
{
static inline uint64_t Hash ( XQNode_t * pNode ) { return (uint64_t)pNode; }
};
bool CSphTransformation::TransformExcessBrackets ()
{
bool bRecollect = false;
CSphOrderedHash<int, XQNode_t *, XQNodeHash_fn, 64> hDeleted;
for ( auto& tSimSimilar : m_hSimilar )
{
for ( auto& tSimilar : tSimSimilar.second )
{
// Nodes with the same iFuzzyHash
for ( XQNode_t* pNode : tSimilar.second )
{
// node environment might be changed due prior nodes transformations
if ( !hDeleted.Exists ( pNode ) && CheckExcessBrackets ( pNode ) )
{
XQNode_t * pDel = sphMoveSiblingsUp ( pNode );
hDeleted.Add ( 1, pDel );
bRecollect = true;
}
}
}
}
return bRecollect;
}
bool CSphTransformation::CheckExcessAndNot ( const XQNode_t * pNode )
{
if ( !pNode || !ParentNode::From ( pNode ) || !GrandNode::From ( pNode ) || !Grand2Node::From ( pNode ) || pNode->GetOp()!=SPH_QUERY_AND ||
( pNode->m_dChildren.GetLength()==1 && pNode->m_dChildren[0]->GetOp()==SPH_QUERY_ANDNOT ) ||
ParentNode::From ( pNode )->GetOp()!=SPH_QUERY_ANDNOT || GrandNode::From ( pNode )->GetOp()!=SPH_QUERY_AND ||
Grand2Node::From ( pNode )->GetOp()!=SPH_QUERY_ANDNOT ||
// FIXME!!! check performance with OR node at 2nd grand instead of regular not NOT
Grand2Node::From ( pNode )->m_dChildren.GetLength()<2 || Grand2Node::From ( pNode )->m_dChildren[1]->GetOp()!=SPH_QUERY_NOT )
{
//
// NOLINT // NOT:
// NOLINT // AND NOT
// NOLINT // / |
// NOLINT // AND NOT
// NOLINT // |
// NOLINT // AND NOT
// NOLINT // / |
// NOLINT // AND(pNode) NOT
// NOLINT // | |
// NOLINT // .. ...
//
return false;
}
return true;
}
bool CSphTransformation::TransformExcessAndNot ()
{
bool bRecollect = false;
CSphOrderedHash<int, XQNode_t *, XQNodeHash_fn, 64> hDeleted;
for ( auto& tSimSimilar : m_hSimilar )
{
for ( auto& tSimilar : tSimSimilar.second )
{
// Nodes with the same iFuzzyHash
for ( XQNode_t* pAnd : tSimilar.second )
{
XQNode_t * pParentAndNot = pAnd->m_pParent;
// node environment might be changed due prior nodes transformations
if ( hDeleted.Exists ( pParentAndNot ) || !CheckExcessAndNot ( pAnd ) )
continue;
assert ( pParentAndNot->m_dChildren.GetLength()==2 );
XQNode_t * pNot = pParentAndNot->m_dChildren[1];
XQNode_t * pGrandAnd = pParentAndNot->m_pParent;
XQNode_t * pGrand2AndNot = pGrandAnd->m_pParent;
assert ( pGrand2AndNot->m_dChildren.GetLength()==2 );
XQNode_t * pGrand2Not = pGrand2AndNot->m_dChildren[1];
assert ( pGrand2Not->m_dChildren.GetLength()==1 );
auto * pNewOr = new XQNode_t ( XQLimitSpec_t() );
pNewOr->SetOp ( SPH_QUERY_OR, pNot->m_dChildren );
pNewOr->m_dChildren.Add ( pGrand2Not->m_dChildren[0] );
pNewOr->m_dChildren.Last()->m_pParent = pNewOr;
pGrand2Not->m_dChildren[0] = pNewOr;
pNewOr->m_pParent = pGrand2Not;
assert ( pGrandAnd->m_dChildren.Contains ( pParentAndNot ) );
int iChild = GetNodeChildIndex ( pGrandAnd, pParentAndNot );
if ( iChild >=0 )
pGrandAnd->m_dChildren[iChild] = pAnd;
pAnd->m_pParent = pGrandAnd;
// Delete excess nodes
hDeleted.Add ( 1, pParentAndNot );
pNot->m_dChildren.Resize ( 0 );
pParentAndNot->m_dChildren[0] = NULL;
SafeDelete ( pParentAndNot );
bRecollect = true;
}
}
}
return bRecollect;
}
void CSphTransformation::Dump ()
{
#ifdef XQ_DUMP_TRANSFORMED_TREE
m_hSimilar.IterateStart();
while ( m_hSimilar.IterateNext() )
{
printf ( "\nnode: hash 0x" UINT64_FMT "\n", m_hSimilar.IterateGetKey() );
m_hSimilar.IterateGet().IterateStart();
while ( m_hSimilar.IterateGet().IterateNext() )
{
CSphVector<XQNode_t *> & dNodes = m_hSimilar.IterateGet().IterateGet();
printf ( "\tgrand: hash 0x" UINT64_FMT ", children %d\n", m_hSimilar.IterateGet().IterateGetKey(), dNodes.GetLength() );
printf ( "\tparents:\n" );
ARRAY_FOREACH ( i, dNodes )
{
uint64_t uParentHash = dNodes[i]->GetHash();
printf ( "\t\thash 0x" UINT64_FMT "\n", uParentHash );
}
}
}
#endif
}
#ifdef XQDEBUG
void CSphTransformation::Dump ( const XQNode_t * pNode, const char * sHeader )
{
printf ( sHeader );
if ( pNode )
{
printf ( "%s\n", sphReconstructNode ( pNode, NULL ).cstr(), NULL );
#ifdef XQ_DUMP_TRANSFORMED_TREE
xqDump ( pNode, 0 );
#endif
}
}
#else
void CSphTransformation::Dump ( const XQNode_t * , const char * )
{}
#endif
void CSphTransformation::Transform ()
{
if ( CollectInfo <ParentNode, NullNode> ( *m_ppRoot, &CheckCommonKeywords ) )
{
bool bDump = TransformCommonKeywords ();
if ( bDump )
Dump ( *m_ppRoot, "\nAfter transformation of 'COMMON KEYWORDS'\n" );
}
if ( CollectInfo <ParentNode, NullNode> ( *m_ppRoot, &CheckCommonPhrase ) )
{
bool bDump = TransformCommonPhrase ();
if ( bDump )
Dump ( *m_ppRoot, "\nAfter transformation of 'COMMON PHRASES'\n" );
}
bool bRecollect = false;
do
{
bRecollect = false;
if ( CollectInfo <Grand2Node, CurrentNode> ( *m_ppRoot, &CheckCommonNot ) )
{
bool bDump = TransformCommonNot ();
bRecollect |= bDump;
Dump ( bDump ? *m_ppRoot : NULL, "\nAfter transformation of 'COMMON NOT'\n" );
}
if ( CollectInfo <Grand3Node, CurrentNode> ( *m_ppRoot, &CheckCommonCompoundNot ) )
{
bool bDump = TransformCommonCompoundNot ();
bRecollect |= bDump;
Dump ( bDump ? *m_ppRoot : NULL, "\nAfter transformation of 'COMMON COMPOUND NOT'\n" );
}
if ( CollectInfo <Grand2Node, CurrentNode> ( *m_ppRoot, &CheckCommonSubTerm ) )
{
bool bDump = TransformCommonSubTerm ();
bRecollect |= bDump;
Dump ( bDump ? *m_ppRoot : NULL, "\nAfter transformation of 'COMMON SUBTERM'\n" );
}
if ( CollectInfo <Grand2Node, CurrentNode> ( *m_ppRoot, &CheckCommonAndNotFactor ) )
{
bool bDump = TransformCommonAndNotFactor ();
bRecollect |= bDump;
Dump ( bDump ? *m_ppRoot : NULL, "\nAfter transformation of 'COMMON ANDNOT FACTOR'\n" );
}
if ( CollectInfo <Grand3Node, CurrentNode> ( *m_ppRoot, &CheckCommonOrNot ) )
{
bool bDump = TransformCommonOrNot ();
bRecollect |= bDump;
Dump ( bDump ? *m_ppRoot : NULL, "\nAfter transformation of 'COMMON OR NOT'\n" );
}
if ( CollectInfo <NullNode, NullNode> ( *m_ppRoot, &CheckHungOperand ) )
{
bool bDump = TransformHungOperand ();
bRecollect |= bDump;
Dump ( bDump ? *m_ppRoot : NULL, "\nAfter transformation of 'HUNG OPERAND'\n" );
}
if ( CollectInfo <NullNode, NullNode> ( *m_ppRoot, &CheckExcessBrackets ) )
{
bool bDump = TransformExcessBrackets ();
bRecollect |= bDump;
Dump ( bDump ? *m_ppRoot : NULL, "\nAfter transformation of 'EXCESS BRACKETS'\n" );
}
if ( CollectInfo <ParentNode, CurrentNode> ( *m_ppRoot, &CheckExcessAndNot ) )
{
bool bDump = TransformExcessAndNot ();
bRecollect |= bDump;
Dump ( bDump ? *m_ppRoot : NULL, "\nAfter transformation of 'EXCESS AND NOT'\n" );
}
} while ( bRecollect );
( *m_ppRoot )->Check ( true );
}
void sphOptimizeBoolean ( XQNode_t ** ppRoot, const ISphKeywordsStat * pKeywords )
{
#ifdef XQDEBUG
int64_t tmDelta = sphMicroTimer();
#endif
CSphTransformation qInfo ( ppRoot, pKeywords );
qInfo.Transform ();
#ifdef XQDEBUG
tmDelta = sphMicroTimer() - tmDelta;
if ( tmDelta>10 )
printf ( "optimized boolean in %d.%03d msec", (int)(tmDelta/1000), (int)(tmDelta%1000) );
#endif
}
TokenizerRefPtr_c sphCloneAndSetupQueryTokenizer ( const TokenizerRefPtr_c& pTokenizer, bool bWildcards, bool bExact, bool bJson )
{
assert ( pTokenizer );
if ( bWildcards )
{
if ( bExact )
{
if ( bJson )
return pTokenizer->Clone ( SPH_CLONE_QUERY_WILD_EXACT_JSON);
return pTokenizer->Clone ( SPH_CLONE_QUERY_WILD_EXACT );
}
if ( bJson )
return pTokenizer->Clone ( SPH_CLONE_QUERY_WILD_JSON );
return pTokenizer->Clone ( SPH_CLONE_QUERY_WILD );
}
if ( bExact )
{
if ( bJson )
return pTokenizer->Clone ( SPH_CLONE_QUERY_EXACT_JSON );
return pTokenizer->Clone ( SPH_CLONE_QUERY_EXACT );
}
if ( bJson )
return pTokenizer->Clone ( SPH_CLONE_QUERY );
return pTokenizer->Clone ( SPH_CLONE_QUERY_ );
}
//////////////////////////////////////////////////////////////////////////
class QueryParserPlain_c : public QueryParser_i
{
public:
bool IsFullscan ( const XQQuery_t & tQuery ) const override { return false; }
bool ParseQuery ( XQQuery_t & tParsed, const char * sQuery, const CSphQuery * pQuery, TokenizerRefPtr_c pQueryTokenizer, TokenizerRefPtr_c pQueryTokenizerJson, const CSphSchema * pSchema, const DictRefPtr_c& pDict, const CSphIndexSettings & tSettings, const CSphBitvec * pMorphFields ) const override;
QueryParser_i * Clone() const final { return new QueryParserPlain_c; }
};
bool QueryParserPlain_c::ParseQuery ( XQQuery_t & tParsed, const char * sQuery, const CSphQuery * pQuery, TokenizerRefPtr_c pQueryTokenizer, TokenizerRefPtr_c, const CSphSchema * pSchema, const DictRefPtr_c& pDict, const CSphIndexSettings & tSettings, const CSphBitvec * pMorphFields ) const
{
return sphParseExtendedQuery ( tParsed, sQuery, pQuery, pQueryTokenizer, pSchema, pDict, tSettings, pMorphFields );
}
std::unique_ptr<QueryParser_i> sphCreatePlainQueryParser()
{
return std::make_unique<QueryParserPlain_c>();
}
int GetExpansionLimit ( int iQueryLimit, int iIndexLimit )
{
return ( iQueryLimit!=DEFAULT_QUERY_EXPANSION_LIMIT ? iQueryLimit : iIndexLimit );
}
| 130,540
|
C++
|
.cpp
| 3,836
| 30.998957
| 302
| 0.662864
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
16,888
|
timeout_queue.cpp
|
manticoresoftware_manticoresearch/src/timeout_queue.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "timeout_queue.h"
#include "std/stringbuilder.h"
#include "std/format.h"
inline static bool operator<( const EnqueuedTimeout_t& dLeft, const EnqueuedTimeout_t& dRight )
{
return dLeft.m_iTimeoutTimeUS < dRight.m_iTimeoutTimeUS;
}
void TimeoutQueue_c::ShiftUp ( int iHole )
{
if ( m_dQueue.IsEmpty() )
return;
int iParent = ( iHole - 1 ) / 2;
// shift up if needed, so that worst (lesser) one's float to the top
while ( iHole && *m_dQueue[iHole] < *m_dQueue[iParent] )
{
std::swap ( m_dQueue[iHole], m_dQueue[iParent] );
m_dQueue[iHole]->m_iTimeoutIdx = iHole;
iHole = iParent;
iParent = ( iHole - 1 ) / 2;
}
m_dQueue[iHole]->m_iTimeoutIdx = iHole;
};
void TimeoutQueue_c::ShiftDown ( int iHole )
{
if ( m_dQueue.IsEmpty() || iHole == m_dQueue.GetLength() )
return;
auto iMinChild = iHole * 2 + 1;
auto iUsed = m_dQueue.GetLength();
while ( iMinChild < iUsed )
{
// select smallest child
if ( iMinChild + 1 < iUsed && *m_dQueue[iMinChild + 1] < *m_dQueue[iMinChild] )
++iMinChild;
// if smallest child is less than entry, do float it to the top
if ( *m_dQueue[iHole] < *m_dQueue[iMinChild] )
break;
std::swap ( m_dQueue[iHole], m_dQueue[iMinChild] );
m_dQueue[iHole]->m_iTimeoutIdx = iHole;
iHole = iMinChild;
iMinChild = iHole * 2 + 1;
}
m_dQueue[iHole]->m_iTimeoutIdx = iHole;
}
void TimeoutQueue_c::Push ( EnqueuedTimeout_t* pTask )
{
m_dQueue.Add ( pTask );
ShiftUp ( m_dQueue.GetLength() - 1 );
}
void TimeoutQueue_c::Pop()
{
if ( m_dQueue.IsEmpty() )
return;
m_dQueue[0]->m_iTimeoutIdx = -1;
m_dQueue.RemoveFast ( 0 );
ShiftDown ( 0 );
}
void TimeoutQueue_c::Change ( EnqueuedTimeout_t* pTask )
{
if ( !pTask )
return;
auto iHole = pTask->m_iTimeoutIdx;
if ( iHole < 0 )
{
Push ( pTask );
return;
}
if ( iHole && *m_dQueue[iHole] < *m_dQueue[( iHole - 1 ) / 2] )
ShiftUp ( iHole );
else
ShiftDown ( iHole );
}
void TimeoutQueue_c::Remove ( EnqueuedTimeout_t* pTask )
{
if ( !pTask )
return;
auto iHole = pTask->m_iTimeoutIdx;
if ( iHole < 0 || iHole >= m_dQueue.GetLength() )
return;
m_dQueue.RemoveFast ( iHole );
if ( iHole < m_dQueue.GetLength() )
{
if ( iHole && *m_dQueue[iHole] < *m_dQueue[( iHole - 1 ) / 2] )
ShiftUp ( iHole );
else
ShiftDown ( iHole );
}
pTask->m_iTimeoutIdx = -1;
}
EnqueuedTimeout_t* TimeoutQueue_c::Root() const
{
if ( m_dQueue.IsEmpty() )
return nullptr;
return m_dQueue[0];
}
CSphString TimeoutQueue_c::DebugDump ( const char* sPrefix ) const
{
StringBuilder_c tBuild;
for ( auto* cTask : m_dQueue )
tBuild.Sprintf ( tBuild.IsEmpty() ? "%p (%l)" : ", %p(%l)", cTask, cTask->m_iTimeoutTimeUS );
CSphString sRes;
if ( !m_dQueue.IsEmpty() )
sRes.SetSprintf ( "%s%d:%s", sPrefix, m_dQueue.GetLength(), tBuild.cstr() );
else
sRes.SetSprintf ( "%sHeap empty.", sPrefix );
return sRes;
}
void TimeoutQueue_c::DebugDump ( const std::function<void ( EnqueuedTimeout_t* )>& fcb ) const
{
for ( auto* cTask : m_dQueue )
fcb ( cTask );
}
| 3,481
|
C++
|
.cpp
| 122
| 26.434426
| 95
| 0.678144
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,889
|
fileio.cpp
|
manticoresoftware_manticoresearch/src/fileio.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "fileio.h"
#include "sphinxint.h"
#define SPH_READ_NOPROGRESS_CHUNK (32768*1024)
//////////////////////////////////////////////////////////////////////////
CSphAutofile::CSphAutofile ( const CSphString & sName, int iMode, CSphString & sError, bool bTemp )
{
Open ( sName, iMode, sError, bTemp );
}
CSphAutofile::~CSphAutofile()
{
Close();
}
static int AutoFileOpen ( const CSphString & sName, int iMode )
{
int iFD = -1;
#if _WIN32
if ( iMode==SPH_O_READ )
{
intptr_t tFD = (intptr_t)CreateFile ( sName.cstr(), GENERIC_READ , FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL );
iFD = _open_osfhandle ( tFD, 0 );
} else
iFD = ::open ( sName.cstr(), iMode, 0644 );
#else
iFD = ::open ( sName.cstr(), iMode, 0644 );
#endif
return iFD;
}
int CSphAutofile::Open ( const CSphString & sName, int iMode, CSphString & sError, bool bTemp )
{
assert ( m_iFD==-1 && m_sFilename.IsEmpty() );
assert ( !sName.IsEmpty() );
m_iFD = AutoFileOpen ( sName, iMode );
m_sFilename = sName; // not exactly sure why is this unconditional. for error reporting later, i suppose
if ( m_iFD<0 )
sError.SetSprintf ( "failed to open %s: %s", sName.cstr(), strerrorm(errno) );
else
m_bTemporary = bTemp; // only if we managed to actually open it
return m_iFD;
}
void CSphAutofile::Close()
{
if ( m_iFD>=0 )
{
::close ( m_iFD );
if ( m_bTemporary )
::unlink ( m_sFilename.cstr() );
}
m_iFD = -1;
m_sFilename = "";
m_bTemporary = false;
}
int CSphAutofile::LeakID ()
{
m_sFilename = "";
m_bTemporary = false;
return std::exchange ( m_iFD, -1 );
}
void CSphAutofile::SetPersistent()
{
m_bTemporary = false;
}
const char * CSphAutofile::GetFilename() const
{
return m_sFilename.scstr();
}
SphOffset_t CSphAutofile::GetSize ( SphOffset_t iMinSize, bool bCheckSizeT, CSphString & sError )
{
struct_stat st;
if ( stat ( GetFilename(), &st )<0 )
{
sError.SetSprintf ( "failed to stat %s: %s", GetFilename(), strerrorm(errno) );
return -1;
}
if ( st.st_size<iMinSize )
{
sError.SetSprintf ( "failed to load %s: bad size " INT64_FMT " (at least " INT64_FMT " bytes expected)",
GetFilename(), (int64_t)st.st_size, (int64_t)iMinSize );
return -1;
}
if ( bCheckSizeT )
{
size_t uCheck = (size_t)st.st_size;
if ( st.st_size!=SphOffset_t(uCheck) )
{
sError.SetSprintf ( "failed to load %s: bad size " INT64_FMT " (out of size_t; 4 GB limit on 32-bit machine hit?)",
GetFilename(), (int64_t)st.st_size );
return -1;
}
}
return st.st_size;
}
SphOffset_t CSphAutofile::GetSize()
{
CSphString sTmp;
return GetSize ( 0, false, sTmp );
}
bool CSphAutofile::Read ( void * pBuf, int64_t iCount, CSphString & sError )
{
assert ( iCount>=0 );
int64_t iToRead = iCount;
BYTE * pCur = (BYTE *)pBuf;
while ( iToRead>0 )
{
int64_t iToReadOnce = Min ( iToRead, SPH_READ_NOPROGRESS_CHUNK );
int64_t iGot = sphRead ( GetFD(), pCur, (size_t)iToReadOnce );
if ( iGot==-1 )
{
// interrupted by a signal - try again
if ( errno==EINTR )
continue;
sError.SetSprintf ( "read error in %s (%s); " INT64_FMT " of " INT64_FMT " bytes read",
GetFilename(), strerrorm(errno), iCount-iToRead, iCount );
return false;
}
// EOF
if ( iGot==0 )
{
sError.SetSprintf ( "unexpected EOF in %s (%s); " INT64_FMT " of " INT64_FMT " bytes read",
GetFilename(), strerrorm(errno), iCount-iToRead, iCount );
return false;
}
iToRead -= iGot;
pCur += iGot;
}
if ( iToRead!=0 )
{
sError.SetSprintf ( "read error in %s (%s); " INT64_FMT " of " INT64_FMT " bytes read",
GetFilename(), strerrorm(errno), iCount-iToRead, iCount );
return false;
}
return true;
}
//////////////////////////////////////////////////////////////////////////
CSphReader::CSphReader ( BYTE * pBuf, int iSize )
: m_pBuff ( pBuf )
, m_iBufSize ( iSize )
, m_iReadUnhinted ( DEFAULT_READ_UNHINTED )
{
assert ( pBuf==NULL || iSize>0 );
}
CSphReader::~CSphReader()
{
if ( m_bBufOwned )
SafeDeleteArray ( m_pBuff );
}
void CSphReader::SetBuffers ( int iReadBuffer, int iReadUnhinted )
{
if ( !m_pBuff )
m_iBufSize = iReadBuffer;
m_iReadUnhinted = iReadUnhinted;
}
void CSphReader::SetFile ( int iFD, const char * sFilename )
{
m_iFD = iFD;
m_iPos = 0;
m_iBuffPos = 0;
m_iBuffUsed = 0;
m_sFilename = sFilename;
}
void CSphReader::SetFile ( const CSphAutofile & tFile )
{
SetFile ( tFile.GetFD(), tFile.GetFilename() );
}
void CSphReader::Reset()
{
SetFile ( -1, "" );
}
/// sizehint > 0 means we expect to read approx that much bytes
/// sizehint == 0 means no hint, use default (happens later in UpdateCache())
/// sizehint == -1 means reposition and adjust current hint
void CSphReader::SeekTo ( SphOffset_t iPos, int iSizeHint )
{
assert ( iPos>=0 );
assert ( iSizeHint>=-1 );
#ifndef NDEBUG
#if PARANOID
struct_stat tStat;
fstat ( m_iFD, &tStat );
if ( iPos > tStat.st_size )
sphDie ( "INTERNAL ERROR: seeking past the end of file" );
#endif
#endif
if ( iPos>=m_iPos && iPos<m_iPos+m_iBuffUsed )
{
m_iBuffPos = (int)( iPos-m_iPos ); // reposition to proper byte
m_iSizeHint = iSizeHint - ( m_iBuffUsed - m_iBuffPos ); // we already have some bytes cached, so let's adjust size hint
assert ( m_iBuffPos<m_iBuffUsed );
} else
{
m_iPos = iPos;
m_iBuffPos = 0; // for GetPos() to work properly, aaaargh
m_iBuffUsed = 0;
if ( iSizeHint==-1 )
{
// the adjustment bureau
// we need to seek but still keep the current hint
// happens on a skiplist jump, for instance
int64_t iHintLeft = m_iPos + m_iSizeHint - iPos;
if ( iHintLeft>0 && iHintLeft<INT_MAX )
iSizeHint = (int)iHintLeft;
else
iSizeHint = 0;
}
// get that hint
assert ( iSizeHint>=0 );
m_iSizeHint = iSizeHint;
}
}
void CSphReader::SkipBytes ( int iCount )
{
// 0 means "no hint", so this clamp works alright
SeekTo ( m_iPos+m_iBuffPos+iCount, Max ( m_iSizeHint-m_iBuffPos-iCount, 0 ) );
}
void CSphReader::UpdateCache()
{
CSphScopedProfile tProf ( m_pProfile, m_eProfileState );
assert ( m_iFD>=0 );
// alloc buf on first actual read
if ( !m_pBuff )
{
if ( m_iBufSize<=0 )
m_iBufSize = DEFAULT_READ_BUFFER;
m_bBufOwned = true;
m_pBuff = new BYTE [ m_iBufSize ];
}
// stream position could be changed externally
// so let's just hope that the OS optimizes redundant seeks
SphOffset_t iNewPos = m_iPos + Min ( m_iBuffPos, m_iBuffUsed );
if ( m_iSizeHint<=0 )
m_iSizeHint = ( m_iReadUnhinted>0 ) ? m_iReadUnhinted : DEFAULT_READ_UNHINTED;
int iReadLen = Min ( m_iSizeHint, m_iBufSize );
m_iBuffPos = 0;
m_iBuffUsed = sphPread ( m_iFD, m_pBuff, iReadLen, iNewPos ); // FIXME! what about throttling?
if ( m_iBuffUsed<0 )
{
m_iBuffUsed = m_iBuffPos = 0;
m_bError = true;
m_sError.SetSprintf ( "pread error in %s: pos=" INT64_FMT ", len=%d, code=%d, msg=%s", m_sFilename.cstr(), (int64_t)iNewPos, iReadLen, errno, strerror(errno) );
return;
}
// all fine, adjust offset and hint
m_iSizeHint -= m_iBuffUsed;
m_iPos = iNewPos;
}
int CSphReader::GetByte()
{
if ( m_iBuffPos>=m_iBuffUsed )
{
UpdateCache();
if ( m_iBuffPos>=m_iBuffUsed )
{
m_bError = true;
m_sError.SetSprintf ( "pread error in %s: pos=" INT64_FMT ", len=%d", m_sFilename.cstr(), (int64_t)m_iPos, 1 );
return 0; // unexpected io failure
}
}
assert ( m_iBuffPos<m_iBuffUsed );
return m_pBuff [ m_iBuffPos++ ];
}
void CSphReader::GetBytes ( void * pData, int iSize )
{
BYTE * pOut = (BYTE*) pData;
while ( iSize>m_iBufSize )
{
int iLen = m_iBuffUsed - m_iBuffPos;
assert ( iLen<=m_iBufSize );
memcpy ( pOut, m_pBuff+m_iBuffPos, iLen );
m_iBuffPos += iLen;
pOut += iLen;
iSize -= iLen;
m_iSizeHint = Max ( m_iReadUnhinted, iSize );
if ( iSize>0 )
{
UpdateCache();
if ( !m_iBuffUsed )
{
m_sError.SetSprintf ( "pread error in %s: pos=" INT64_FMT ", len=%d, code=%d, msg=%s", m_sFilename.cstr(), (int64_t)m_iPos, iSize, errno, strerror(errno) );
memset ( pData, 0, iSize );
return; // unexpected io failure
}
}
}
if ( iSize>m_iBuffUsed-m_iBuffPos )
{
// move old buffer tail to buffer head to avoid losing the data
const int iLen = m_iBuffUsed - m_iBuffPos;
if ( iLen>0 )
{
memcpy ( pOut, m_pBuff+m_iBuffPos, iLen );
m_iBuffPos += iLen;
pOut += iLen;
iSize -= iLen;
}
m_iSizeHint = Max ( m_iReadUnhinted, iSize );
UpdateCache();
if ( iSize>m_iBuffUsed-m_iBuffPos )
{
memset ( pData, 0, iSize ); // unexpected io failure
m_bError = true;
m_sError.SetSprintf ( "pread error in %s: pos=" INT64_FMT ", len=%d", m_sFilename.cstr(), (int64_t)m_iPos, iSize );
return;
}
}
assert ( (m_iBuffPos+iSize)<=m_iBuffUsed );
memcpy ( pOut, m_pBuff+m_iBuffPos, iSize );
m_iBuffPos += iSize;
}
int CSphReader::GetLine ( char * sBuffer, int iMaxLen )
{
int iOutPos = 0;
iMaxLen--; // reserve space for trailing '\0'
// grab as many chars as we can
while ( iOutPos<iMaxLen )
{
// read next chunk if necessary
if ( m_iBuffPos>=m_iBuffUsed )
{
UpdateCache();
if ( m_iBuffPos>=m_iBuffUsed )
{
if ( iOutPos==0 ) return -1; // current line is empty; indicate eof
break; // return current line; will return eof next time
}
}
// break on CR or LF
if ( m_pBuff[m_iBuffPos]=='\r' || m_pBuff[m_iBuffPos]=='\n' )
break;
// one more valid char
sBuffer[iOutPos++] = m_pBuff[m_iBuffPos++];
}
// skip everything until the newline or eof
while (true)
{
// read next chunk if necessary
if ( m_iBuffPos>=m_iBuffUsed )
UpdateCache();
// eof?
if ( m_iBuffPos>=m_iBuffUsed )
break;
// newline?
if ( m_pBuff[m_iBuffPos++]=='\n' )
break;
}
// finalize
sBuffer[iOutPos] = '\0';
return iOutPos;
}
void CSphReader::ResetError()
{
m_bError = false;
m_sError = "";
}
SphOffset_t CSphReader::GetFilesize() const
{
assert ( m_iFD>=0 );
return sphGetFileSize ( m_iFD, nullptr );
}
#if TRACE_UNZIP
std::array<std::atomic<uint64_t>, 5> CSphReader::m_dZip32Stats = { 0 };
std::array<std::atomic<uint64_t>, 10> CSphReader::m_dZip64Stats = { 0 };
DWORD CSphReader::UnzipInt()
{
DWORD uRes = UnzipValueBE<DWORD> ( [this]() mutable { return GetByte(); } );
m_dZip32Stats[sphCalcZippedLen ( uRes ) - 1].fetch_add ( 1, std::memory_order_relaxed );
return uRes;
}
uint64_t CSphReader::UnzipOffset()
{
uint64_t uRes = UnzipValueBE<uint64_t> ( [this]() mutable { return GetByte(); } );
m_dZip64Stats[sphCalcZippedLen ( uRes ) - 1].fetch_add ( 1, std::memory_order_relaxed );
return uRes;
}
#else
DWORD CSphReader::UnzipInt()
{
return UnzipValueBE<DWORD> ( [this]() mutable { return GetByte(); } );
}
uint64_t CSphReader::UnzipOffset()
{
return UnzipValueBE<uint64_t> ( [this]() mutable { return GetByte(); } );
}
#endif
CSphReader & CSphReader::operator = ( const CSphReader & rhs )
{
SetFile ( rhs.m_iFD, rhs.m_sFilename.cstr() );
SeekTo ( rhs.m_iPos + rhs.m_iBuffPos, rhs.m_iSizeHint );
return *this;
}
DWORD CSphReader::GetDword()
{
DWORD uRes = 0;
GetBytes ( &uRes, sizeof(DWORD) );
return uRes;
}
SphOffset_t CSphReader::GetOffset()
{
SphOffset_t uRes = 0;
GetBytes ( &uRes, sizeof(SphOffset_t) );
return uRes;
}
CSphString CSphReader::GetString()
{
CSphString sRes;
DWORD uLen = GetDword ();
if ( uLen )
{
sRes.Reserve ( uLen );
GetBytes ( (BYTE *) sRes.cstr (), uLen );
}
return sRes;
}
CSphString CSphReader::GetZString ()
{
CSphString sRes;
auto uLen = UnzipOffset();
if ( uLen )
{
sRes.Reserve ( uLen );
GetBytes ( (BYTE *) sRes.cstr (), uLen );
}
return sRes;
}
bool CSphReader::Tag ( const char * sTag )
{
if ( m_bError )
return false;
assert ( sTag && *sTag ); // empty tags are nonsense
assert ( strlen(sTag)<64 ); // huge tags are nonsense
auto iLen = (int) strlen(sTag);
char sBuf[64];
GetBytes ( sBuf, iLen );
if ( !memcmp ( sBuf, sTag, iLen ) )
return true;
m_bError = true;
m_sError.SetSprintf ( "expected tag %s was not found", sTag );
return false;
}
//////////////////////////////////////////////////////////////////////////
bool CSphAutoreader::Open ( const CSphString & sFilename, CSphString & sError )
{
assert ( m_iFD<0 );
assert ( !sFilename.IsEmpty() );
m_iFD = AutoFileOpen ( sFilename, SPH_O_READ );
m_iPos = 0;
m_iBuffPos = 0;
m_iBuffUsed = 0;
m_sFilename = sFilename;
if ( m_iFD<0 )
sError.SetSprintf ( "failed to open %s: %s", sFilename.cstr(), strerror(errno) );
return ( m_iFD>=0 );
}
void CSphAutoreader::Close()
{
if ( m_iFD>=0 )
::close ( m_iFD );
m_iFD = -1;
}
SphOffset_t FileReader_c::GetFilesize() const
{
assert ( m_iFD>=0 );
return sphGetFileSize ( m_iFD, nullptr );
}
//////////////////////////////////////////////////////////////////////////
void CSphWriter::SetBufferSize ( int iBufferSize )
{
if ( iBufferSize!=m_iBufferSize )
{
m_iBufferSize = Max ( iBufferSize, 262144 );
m_pBuffer = nullptr;
}
}
bool CSphWriter::OpenFile ( const CSphString & sName, CSphString & sErrorBuffer )
{
return OpenFile ( sName, SPH_O_NEW, sErrorBuffer );
}
bool CSphWriter::OpenFile ( const CSphString & sName, int iOpenFlags, CSphString & sErrorBuffer )
{
assert ( !sName.IsEmpty() );
assert ( m_iFD<0 && "already open" );
m_bOwnFile = true;
m_sName = sName;
m_pError = &sErrorBuffer;
if ( !m_pBuffer )
m_pBuffer = std::make_unique<BYTE[]> ( m_iBufferSize );
m_iFD = ::open ( m_sName.cstr(), iOpenFlags, 0644 );
m_pPool = m_pBuffer.get();
m_iPoolUsed = 0;
m_iPos = 0;
m_iDiskPos = 0;
m_bError = ( m_iFD<0 );
if ( m_bError )
m_pError->SetSprintf ( "failed to create %s: %s" , sName.cstr(), strerror(errno) );
return !m_bError;
}
void CSphWriter::SetFile ( CSphAutofile & tAuto, SphOffset_t * pSharedOffset, CSphString & sError )
{
assert ( m_iFD<0 && "already open" );
m_bOwnFile = false;
if ( !m_pBuffer )
m_pBuffer = std::make_unique<BYTE[]> ( m_iBufferSize );
m_iFD = tAuto.GetFD();
m_sName = tAuto.GetFilename();
m_pPool = m_pBuffer.get();
m_iPoolUsed = 0;
m_iPos = 0;
m_iDiskPos = 0;
m_pSharedOffset = pSharedOffset;
m_pError = &sError;
assert ( m_pError );
}
CSphWriter::~CSphWriter()
{
if ( m_bUnlinkNonClosed && m_bOwnFile )
{
if ( m_iFD >= 0 )
::close ( m_iFD );
::unlink ( m_sName.cstr() );
} else
CloseFile();
}
void CSphWriter::CloseFile ( bool bTruncate )
{
if ( m_iFD>=0 )
{
Flush();
if ( bTruncate )
sphTruncate ( m_iFD );
if ( m_bOwnFile )
::close ( m_iFD );
m_iFD = -1;
m_bUnlinkNonClosed = m_bError;
}
}
void CSphWriter::UpdatePoolUsed()
{
if ( m_pPool-m_pBuffer.get() > m_iPoolUsed )
m_iPoolUsed = m_pPool- m_pBuffer.get();
}
void CSphWriter::PutByte ( BYTE uValue )
{
assert ( m_pPool );
if ( m_iPoolUsed==m_iBufferSize )
Flush();
*m_pPool++ = uValue;
UpdatePoolUsed();
m_iPos++;
}
void CSphWriter::PutBytes ( const void * pData, int64_t iSize )
{
assert ( m_pPool );
const BYTE * pBuf = (const BYTE *) pData;
while ( iSize>0 )
{
int iPut = ( iSize<m_iBufferSize ? int(iSize) : m_iBufferSize ); // comparison int64 to int32
if ( m_iPoolUsed+iPut>m_iBufferSize )
Flush();
assert ( m_iPoolUsed+iPut<=m_iBufferSize );
memcpy ( m_pPool, pBuf, iPut );
m_pPool += iPut;
UpdatePoolUsed();
m_iPos += iPut;
pBuf += iPut;
iSize -= iPut;
}
}
void CSphWriter::ZipInt ( DWORD uValue )
{
ZipValueBE ( [this] ( BYTE b ) { PutByte ( b ); }, uValue );
}
void CSphWriter::ZipOffset ( uint64_t uValue )
{
ZipValueBE ( [this] ( BYTE b ) { PutByte ( b ); }, uValue );
}
void CSphWriter::Flush()
{
if ( m_pSharedOffset && *m_pSharedOffset!=m_iDiskPos )
{
auto uMoved = sphSeek ( m_iFD, m_iDiskPos, SEEK_SET );
if ( uMoved!= m_iDiskPos )
{
m_bError = true;
return;
}
}
if ( !sphWriteThrottled ( m_iFD, m_pBuffer.get(), m_iPoolUsed, m_sName.cstr(), *m_pError ) )
m_bError = true;
m_iDiskPos += m_iPoolUsed;
m_iPoolUsed = 0;
m_pPool = m_pBuffer.get();
if ( m_pSharedOffset )
*m_pSharedOffset = m_iDiskPos;
}
void CSphWriterNonThrottled::Flush ()
{
if ( m_pSharedOffset && *m_pSharedOffset!=m_iDiskPos )
{
auto uMoved = sphSeek ( m_iFD, m_iDiskPos, SEEK_SET );
if ( uMoved!=m_iDiskPos )
{
m_bError = true;
return;
}
}
if ( !WriteNonThrottled ( m_iFD, m_pBuffer.get (), m_iPoolUsed, m_sName.cstr (), *m_pError ) )
m_bError = true;
m_iDiskPos += m_iPoolUsed;
m_iPoolUsed = 0;
m_pPool = m_pBuffer.get ();
if ( m_pSharedOffset )
*m_pSharedOffset = m_iDiskPos;
}
void CSphWriter::PutString ( const char * szString )
{
int iLen = szString ? (int) strlen ( szString ) : 0;
PutDword ( iLen );
if ( iLen )
PutBytes ( szString, iLen );
}
void CSphWriter::PutString ( const CSphString & sString )
{
int iLen = sString.Length();
PutDword ( iLen );
if ( iLen )
PutBytes ( sString.cstr(), iLen );
}
void CSphWriter::PutZString ( const char * szString )
{
int iLen = szString ? (int) strlen ( szString ) : 0;
ZipOffset ( iLen );
if ( iLen )
PutBytes ( szString, iLen );
}
void CSphWriter::PutZString ( const CSphString & sString )
{
int iLen = sString.Length ();
ZipOffset ( iLen );
if ( iLen )
PutBytes ( sString.cstr (), iLen );
}
void CSphWriter::Tag ( const char * sTag )
{
assert ( sTag && *sTag ); // empty tags are nonsense
assert ( strlen(sTag)<64 ); // huge tags are nonsense
PutBytes ( sTag, strlen(sTag) );
}
bool SeekAndWarn ( int iFD, SphOffset_t iPos, const char * szWarnPrefix )
{
assert ( szWarnPrefix );
auto iSeek = sphSeek ( iFD, iPos, SEEK_SET );
if ( iSeek!=iPos )
{
if ( iSeek<0 )
sphWarning ( "%s : seek error. Error: %d '%s'", szWarnPrefix, errno, strerrorm (errno) );
else
sphWarning ( "%s : seek error. Expected: " INT64_FMT ", got " INT64_FMT, szWarnPrefix, (int64_t) iPos, (int64_t) iSeek );
return false;
}
assert ( iSeek==iPos );
return true;
}
void CSphWriter::SeekTo ( SphOffset_t iPos, bool bTruncate )
{
assert ( iPos>=0 );
if ( iPos>=m_iDiskPos && iPos<=( m_iDiskPos + m_iPoolUsed ) )
{
// seeking inside the buffer
// m_iPoolUsed should be always in sync with m_iPos
// or it breaks seek back at cidxHit
m_iPoolUsed = (int)( iPos - m_iDiskPos );
m_pPool = m_pBuffer.get() + m_iPoolUsed;
} else
{
Flush();
SeekAndWarn ( m_iFD, iPos, "CSphWriter::SeekTo" );
if ( bTruncate )
sphTruncate(m_iFD);
m_pPool = m_pBuffer.get();
m_iPoolUsed = 0;
m_iDiskPos = iPos;
}
m_iPos = iPos;
}
//////////////////////////////////////////////////////////////////////////
static int g_iIOpsDelay = 0;
static int g_iMaxIOSize = 0;
static std::atomic<int64_t> g_tmNextIOTime { 0 };
void sphSetThrottling ( int iMaxIOps, int iMaxIOSize )
{
g_iIOpsDelay = iMaxIOps ? 1000000 / iMaxIOps : iMaxIOps;
g_iMaxIOSize = iMaxIOSize;
}
static inline void ThrottleSleep()
{
if ( !g_iIOpsDelay )
return;
auto tmTimer = sphMicroTimer();
while ( tmTimer < g_tmNextIOTime.load ( std::memory_order_relaxed ) ) // m.b. >1 sleeps if another thread more lucky
{
sphSleepMsec ( (int)( g_tmNextIOTime.load ( std::memory_order_relaxed ) - tmTimer ) / 1000 );
tmTimer = sphMicroTimer();
}
g_tmNextIOTime.store ( tmTimer + g_iIOpsDelay, std::memory_order_relaxed );
}
bool sphWriteThrottled ( int iFD, const void* pBuf, int64_t iCount, const char* sName, CSphString& sError )
{
if ( iCount <= 0 )
return true;
// by default, slice ios by at most 1 GB
int iChunkSize = ( 1UL << 30 );
// when there's a sane max_iosize (4K to 1GB), use it
if ( g_iMaxIOSize >= 4096 )
iChunkSize = Min ( iChunkSize, g_iMaxIOSize );
CSphIOStats* pIOStats = GetIOStats();
int64_t iTotalWritten = 0;
const int64_t iTotalCount = iCount;
// while there's data, write it chunk by chunk
auto* p = (const BYTE*)pBuf;
while ( iCount )
{
// wait for a timely occasion
ThrottleSleep();
// write (and maybe time)
int64_t tmTimer = 0;
if ( pIOStats )
tmTimer = sphMicroTimer();
auto iToWrite = (int)Min ( iCount, iChunkSize );
auto iWritten = (int)::write ( iFD, &p[iTotalWritten], iToWrite );
if ( pIOStats )
{
pIOStats->m_iWriteTime += sphMicroTimer() - tmTimer;
pIOStats->m_iWriteOps++;
pIOStats->m_iWriteBytes += iWritten;
}
if ( sphInterrupted() && iWritten != iToWrite )
{
sError.SetSprintf ( "%s: write interrupted: %d of %d bytes written", sName, iWritten, iToWrite );
return false;
}
// failure? report, bailout
if ( iWritten<0 )
{
if ( iTotalWritten!=iTotalCount )
sError.SetSprintf ( "%s: write error: %s", sName, strerrorm ( errno ) );
else
sError.SetSprintf ( "%s: write error: %s; " INT64_FMT " of " INT64_FMT " bytes written", sName, strerrorm ( errno ), iTotalWritten, iTotalCount );
return false;
}
// success? rinse, repeat
iCount -= iWritten;
iTotalWritten += iWritten;
}
return true;
}
bool WriteNonThrottled ( int iFD, const void * pBuf, int64_t iCount, const char * sName, CSphString & sError )
{
if ( iCount<=0 )
return true;
CSphIOStats * pIOStats = GetIOStats ();
int64_t iTotalWritten = 0;
const int64_t iTotalCount = iCount;
// while there's data, write it chunk by chunk
auto * p = (const BYTE *) pBuf;
while ( iCount )
{
int64_t tmTimer = 0;
if ( pIOStats )
tmTimer = sphMicroTimer ();
auto iToWrite = (int) Min ( iCount, 1UL << 30 );
auto iWritten = (int) ::write ( iFD, &p[iTotalWritten], iToWrite );
if ( pIOStats )
{
pIOStats->m_iWriteTime += sphMicroTimer ()-tmTimer;
pIOStats->m_iWriteOps++;
pIOStats->m_iWriteBytes += iWritten;
}
if ( sphInterrupted () && iWritten!=iToWrite )
{
sError.SetSprintf ( "%s: write interrupted: %d of %d bytes written", sName, iWritten, iToWrite );
return false;
}
// failure? report, bailout
if ( iWritten<0 )
{
if ( iTotalWritten!=iTotalCount )
sError.SetSprintf ( "%s: write error: %s", sName, strerrorm ( errno ) );
else
sError.SetSprintf ( "%s: write error: %s; " INT64_FMT " of " INT64_FMT " bytes written", sName, strerrorm ( errno ), iTotalWritten, iTotalCount );
return false;
}
// success? rinse, repeat
iCount -= iWritten;
iTotalWritten += iWritten;
}
return true;
}
size_t sphReadThrottled ( int iFD, void* pBuf, size_t iCount )
{
if ( iCount <= 0 )
return iCount;
auto iStep = g_iMaxIOSize ? Min ( iCount, (size_t)g_iMaxIOSize ) : iCount;
auto* p = (BYTE*)pBuf;
size_t nBytesToRead = iCount;
while ( iCount && !sphInterrupted() )
{
ThrottleSleep();
auto iChunk = (long)Min ( iCount, iStep );
auto iRead = sphRead ( iFD, p, iChunk );
p += iRead;
iCount -= iRead;
if ( iRead != iChunk )
break;
}
return nBytesToRead - iCount; // FIXME? we sure this is under 2gb?
}
//////////////////////////////////////////////////////////////////////////
#if _WIN32
// atomic seek+read for Windows
int sphPread ( int iFD, void * pBuf, int iBytes, SphOffset_t iOffset )
{
if ( iBytes==0 )
return 0;
CSphIOStats * pIOStats = GetIOStats();
int64_t tmStart = 0;
if ( pIOStats )
tmStart = sphMicroTimer();
HANDLE hFile;
hFile = (HANDLE) _get_osfhandle ( iFD );
if ( hFile==INVALID_HANDLE_VALUE )
return -1;
STATIC_SIZE_ASSERT ( SphOffset_t, 8 );
OVERLAPPED tOverlapped = { 0 };
tOverlapped.Offset = (DWORD)( iOffset & I64C(0xffffffff) );
tOverlapped.OffsetHigh = (DWORD)( iOffset>>32 );
DWORD uRes;
if ( !ReadFile ( hFile, pBuf, iBytes, &uRes, &tOverlapped ) )
{
DWORD uErr = GetLastError();
if ( uErr==ERROR_HANDLE_EOF )
return 0;
errno = uErr; // FIXME! should remap from Win to POSIX
return -1;
}
if ( pIOStats )
{
pIOStats->m_iReadTime += sphMicroTimer() - tmStart;
pIOStats->m_iReadOps++;
pIOStats->m_iReadBytes += iBytes;
}
return uRes;
}
#else
#if HAVE_PREAD
// atomic seek+read for non-Windows systems with pread() call
int sphPread ( int iFD, void * pBuf, int iBytes, SphOffset_t iOffset )
{
CSphIOStats * pIOStats = GetIOStats();
if ( !pIOStats )
return ::pread ( iFD, pBuf, iBytes, iOffset );
int64_t tmStart = sphMicroTimer();
int iRes = (int) ::pread ( iFD, pBuf, iBytes, iOffset );
if ( pIOStats )
{
pIOStats->m_iReadTime += sphMicroTimer() - tmStart;
pIOStats->m_iReadOps++;
pIOStats->m_iReadBytes += iBytes;
}
return iRes;
}
#else
// generic fallback; prone to races between seek and read
int sphPread ( int iFD, void * pBuf, int iBytes, SphOffset_t iOffset )
{
if ( sphSeek ( iFD, iOffset, SEEK_SET )==-1 )
return -1;
return sphReadThrottled ( iFD, pBuf, iBytes, &g_tThrottle );
}
#endif // HAVE_PREAD
#endif // _WIN32
| 24,887
|
C++
|
.cpp
| 889
| 25.559055
| 176
| 0.658563
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,890
|
threads_detached.cpp
|
manticoresoftware_manticoresearch/src/threads_detached.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "threadutils.h"
#include <csignal>
using namespace Threads;
namespace {
RwLock_t& g_dDetachedGuard()
{
static RwLock_t dDetachedGuard;
return dDetachedGuard;
}
CSphVector<LowThreadDesc_t *>& g_dDetachedThreads()
{
static CSphVector<LowThreadDesc_t *> dDetachedThreads GUARDED_BY ( g_dDetachedGuard () );
return dDetachedThreads;
}
}
// walk over list of running detached threads and apply fnHandler to each of them
// Each call made under r-lock to keep thread list intact
void Iterate ( ThreadFN& fnHandler )
{
ScRL_t _ ( g_dDetachedGuard () );
for ( auto * pThread : g_dDetachedThreads () )
fnHandler ( pThread );
}
// register shutdown action that will walk over list of running detached threads and send SIGTERM to each of them.
// then wait until they're finished.
// Also register iterations right now;
void Detached::MakeAloneIteratorAvailable ()
{
#ifndef NDEBUG
static bool bAlreadyInvoked = false;
assert ( !bAlreadyInvoked );
bAlreadyInvoked = true;
#endif
Threads::RegisterIterator ( Iterate );
// all about windows that we use pthread_kill right now.
// if analogue exists there, the limitation can be removed.
//#if !_WIN32
// searchd::AddShutdownCb ( []
// {
// Detached::ShutdownAllAlones();
// });
//#endif
}
static int64_t g_tmShutdownAllAlonesDelta = 3; // max allowed wait in seconds
void Detached::ShutdownAllAlones()
{
#if !_WIN32
int iThreads;
{
ScRL_t _ ( g_dDetachedGuard() );
iThreads = g_dDetachedThreads().GetLength();
}
int iTurn = 1;
int64_t tmStart = sphMicroTimer();
int64_t tmEnd = tmStart + g_tmShutdownAllAlonesDelta * 1000000;
while ( iThreads > 0 )
{
{
ScRL_t _ ( g_dDetachedGuard() );
sphWarning ( "ShutdownAllAlones will kill %d threads", iThreads );
for ( auto* pThread : g_dDetachedThreads() )
{
if ( pThread )
{
sphInfo ( "Kill thread '%s' with id %d, try %d",
pThread->m_sThreadName.cstr(),
pThread->m_iThreadID,
iTurn );
pthread_kill ( pThread->m_tThread, SIGTERM );
}
}
}
auto iStart = 0;
while ( true )
{
{
ScRL_t _ ( g_dDetachedGuard() );
iThreads = g_dDetachedThreads().GetLength();
}
if ( iThreads <= 0 )
break;
sphSleepMsec ( 50 );
iStart += 50;
if ( iStart >= 10000 ) // wait 10 seconds between tries
{
sphWarning ( "ShutdownAllAlones catch still has %d alone threads", iThreads );
break;
}
}
++iTurn;
int64_t tmCur = sphMicroTimer();
if ( tmCur>tmEnd )
{
sphWarning ( "ShutdownAllAlones exits by timeout (%.3f seconds) but still has %d alone threads", ( (tmCur-tmStart)/1000000.0f ), iThreads );
break;
}
}
#endif
}
void Detached::AddThread ( LowThreadDesc_t* pThread )
{
ScWL_t _ ( g_dDetachedGuard() );
sphLogDebug ( "Detached::AddThread called for '%s', tid %d",
pThread->m_sThreadName.cstr(), pThread->m_iThreadID );
g_dDetachedThreads ().Add ( pThread );
}
void Detached::RemoveThread ( LowThreadDesc_t* pVictim )
{
sphLogDebug ( "Detached::RemoveThread called for %d", pVictim->m_iThreadID );
ScWL_t _ ( g_dDetachedGuard() );
ARRAY_FOREACH ( i, g_dDetachedThreads() )
{
auto pThread = g_dDetachedThreads ()[i];
if ( Threads::Same ( pThread, pVictim ) )
{
sphLogDebug ( "Terminated thread %d, '%s'", pThread->m_iThreadID, pThread->m_sThreadName.cstr () );
g_dDetachedThreads().RemoveFast ( i );
return;
}
}
}
| 3,871
|
C++
|
.cpp
| 132
| 26.666667
| 143
| 0.698844
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,891
|
searchdssl.cpp
|
manticoresoftware_manticoresearch/src/searchdssl.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "searchdssl.h"
#if WITH_SSL
#ifdef DAEMON
#include "sphinxstd.h"
#include <openssl/ssl.h>
#include <openssl/err.h>
#include <openssl/bio.h>
#include <memory>
static CSphString g_sSslCert;
static CSphString g_sSslKey;
static CSphString g_sSslCa;
#if not defined OPENSSL_API_COMPAT or OPENSSL_API_COMPAT >= 0x10100000L
// need by OpenSSL
struct CRYPTO_dynlock_value
{
CSphMutex m_tLock;
};
static CSphFixedVector<CSphMutex> g_dSslLocks { 0 };
static void fnSslLock ( int iMode, int iLock, const char*, int )
{
if ( iMode & CRYPTO_LOCK )
g_dSslLocks[iLock].Lock();
else
g_dSslLocks[iLock].Unlock();
}
static CRYPTO_dynlock_value* fnSslLockDynCreate ( const char*, int )
{
auto* pLock = new CRYPTO_dynlock_value;
return pLock;
}
static void fnSslLockDyn ( int iMode, CRYPTO_dynlock_value* pLock, const char*, int )
{
assert ( pLock );
if ( iMode & CRYPTO_LOCK )
pLock->m_tLock.Lock();
else
pLock->m_tLock.Unlock();
}
static void fnSslLockDynDestroy ( CRYPTO_dynlock_value* pLock, const char*, int )
{
SafeDelete ( pLock );
}
inline static void ResetSslLocks ( int iLocks )
{
g_dSslLocks.Reset ( iLocks );
}
#else
inline static void ResetSslLocks ( int iLocks ) {}
#endif
static BIO_METHOD * BIO_s_coroAsync ( bool bDestroy = false );
static int fnSslError ( const char * pStr, size_t iLen, void * pError )
{
// trim line ending from string end
while ( iLen && sphIsSpace ( pStr[iLen-1] ) )
iLen--;
if ( pError )
( (CSphString *)pError )->SetSprintf ( "%.*s", (int)iLen, pStr );
else
sphWarning ( "%.*s", (int)iLen, pStr );
return 1;
}
#define FBLACK "\x1b[30m"
#define FRED "\x1b[31m"
#define FGREEN "\x1b[32m"
#define FYELLOW "\x1b[33m"
#define FCYAN "\x1b[34m"
#define FPURPLE "\x1b[35m"
#define FBLUE "\x1b[35m"
#define FWHITE "\x1b[35m"
#define NORM "\x1b[0m"
#define FRONT FRED
#define FRONTN FPURPLE
#define BACK FGREEN
#define BACKN FYELLOW
#define SYSN FCYAN
void SetServerSSLKeys ( const CSphString & sSslCert, const CSphString & sSslKey, const CSphString & sSslCa )
{
g_sSslCert = sSslCert;
g_sSslKey = sSslKey;
g_sSslCa = sSslCa;
}
static bool IsKeysSet()
{
return !( g_sSslCert.IsEmpty () && g_sSslKey.IsEmpty () && g_sSslCa.IsEmpty ());
}
// set SSL key, certificate and ca-certificate to global SSL context
static bool SetGlobalKeys ( SSL_CTX * pCtx, CSphString * pError )
{
if ( !(IsKeysSet()) )
return false;
if ( !g_sSslCert.IsEmpty () && SSL_CTX_use_certificate_file ( pCtx, g_sSslCert.cstr (), SSL_FILETYPE_PEM )<=0 )
{
ERR_print_errors_cb ( &fnSslError, pError );
return false;
}
if ( !g_sSslKey.IsEmpty () && SSL_CTX_use_PrivateKey_file ( pCtx, g_sSslKey.cstr (), SSL_FILETYPE_PEM )<=0 )
{
ERR_print_errors_cb ( &fnSslError, pError );
return false;
}
if ( !g_sSslCa.IsEmpty () && SSL_CTX_load_verify_locations ( pCtx, g_sSslCa.cstr(), nullptr )<=0 )
{
ERR_print_errors_cb ( &fnSslError, pError );
return false;
}
// check key and certificate file match
if ( SSL_CTX_check_private_key( pCtx )!=1 )
{
ERR_print_errors_cb ( &fnSslError, pError );
return false;
}
return true;
}
// free SSL related data
static void SslFreeCtx ( SSL_CTX * pCtx )
{
if ( !pCtx )
return;
SSL_CTX_free ( pCtx );
pCtx = nullptr;
CRYPTO_set_locking_callback ( nullptr );
CRYPTO_set_dynlock_create_callback ( nullptr );
CRYPTO_set_dynlock_lock_callback ( nullptr );
CRYPTO_set_dynlock_destroy_callback ( nullptr );
EVP_cleanup();
CRYPTO_cleanup_all_ex_data();
ERR_remove_state ( 0 );
ERR_free_strings();
ResetSslLocks ( 0 );
}
using SmartSSL_CTX_t = SharedPtrCustom_t<SSL_CTX>;
// init SSL library and global context by demand
static SmartSSL_CTX_t GetSslCtx ()
{
static SmartSSL_CTX_t pSslCtx;
if ( !pSslCtx )
{
int iLocks = CRYPTO_num_locks();
ResetSslLocks ( iLocks );
CRYPTO_set_locking_callback ( &fnSslLock );
CRYPTO_set_dynlock_create_callback ( &fnSslLockDynCreate );
CRYPTO_set_dynlock_lock_callback ( &fnSslLockDyn );
CRYPTO_set_dynlock_destroy_callback ( &fnSslLockDynDestroy );
SSL_load_error_strings();
SSL_library_init();
const SSL_METHOD * pMode = nullptr;
#if HAVE_TLS_SERVER_METHOD
pMode = TLS_server_method ();
#elif HAVE_TLSV1_2_METHOD
pMode = TLSv1_2_server_method();
#elif HAVE_TLSV1_1_SERVER_METHOD
pMode = TLSv1_1_server_method();
#else
pMode = SSLv23_server_method();
#endif
pSslCtx = SmartSSL_CTX_t ( SSL_CTX_new ( pMode ), [] ( SSL_CTX * pCtx )
{
sphLogDebugv ( BACKN "~~ Releasing ssl context." NORM );
BIO_s_coroAsync ( true );
SslFreeCtx ( pCtx );
});
SSL_CTX_set_verify ( pSslCtx, SSL_VERIFY_NONE, nullptr );
// shedule callback for final shutdown.
searchd::AddShutdownCb ( [pRefCtx = pSslCtx] {
sphLogDebugv ( BACKN "~~ Shutdowncb called." NORM );
pSslCtx = nullptr;
// pRefCtx will be also deleted going out of scope
} );
}
return pSslCtx;
}
static SmartSSL_CTX_t GetReadySslCtx ( CSphString * pError=nullptr )
{
if ( !IsKeysSet ())
return SmartSSL_CTX_t ( nullptr );
auto pCtx = GetSslCtx ();
if ( !pCtx )
return pCtx;
static bool bKeysLoaded = false;
if ( !bKeysLoaded && SetGlobalKeys ( pCtx, pError ) )
bKeysLoaded = true;
if ( !bKeysLoaded )
return SmartSSL_CTX_t ( nullptr );
return pCtx;
}
// is global SSL context created and keys set
bool CheckWeCanUseSSL ( CSphString * pError )
{
static bool bCheckPerformed = false; // to check only once
static bool bWeCanUseSSL;
if ( bCheckPerformed )
return bWeCanUseSSL;
bCheckPerformed = true;
bWeCanUseSSL = ( GetReadySslCtx ( pError )!=nullptr );
return bWeCanUseSSL;
}
// translates AsyncNetBuffer_c to openSSL BIO calls.
class BioAsyncNetAdapter_c
{
std::unique_ptr<AsyncNetBuffer_c> m_pBackend;
GenericOutputBuffer_c& m_tOut;
AsyncNetInputBuffer_c& m_tIn;
public:
explicit BioAsyncNetAdapter_c ( std::unique_ptr<AsyncNetBuffer_c> pSource )
: m_pBackend ( std::move (pSource) )
, m_tOut ( *m_pBackend )
, m_tIn ( *m_pBackend )
{}
int BioRead ( char * pBuf, int iLen )
{
sphLogDebugv ( BACK "<< BioBackRead (%p) for %p, %d, in buf %d" NORM, this, pBuf, iLen, m_tIn.HasBytes () );
if ( !pBuf || iLen<=0 )
return 0;
if ( !m_tIn.ReadFrom ( iLen ))
iLen = -1;
auto dBlob = m_tIn.PopTail ( iLen );
if ( IsEmpty ( dBlob ))
return 0;
memcpy ( pBuf, dBlob.first, dBlob.second );
return dBlob.second;
}
int BioWrite ( const char * pBuf, int iLen )
{
sphLogDebugv ( BACK ">> BioBackWrite (%p) for %p, %d" NORM, this, pBuf, iLen );
if ( !pBuf || iLen<=0 )
return 0;
m_tOut.SendBytes ( pBuf, iLen );
return iLen;
}
long BioCtrl ( int iCmd, long iNum, void * pPtr)
{
long iRes = 0;
switch ( iCmd )
{
case BIO_CTRL_DGRAM_SET_RECV_TIMEOUT: // BIO_CTRL_DGRAM* used for convenience, as something named 'TIMEOUT'
sphLogDebugv ( BACKN "~~ BioBackCtrl (%p) set recv tm %lds" NORM, this, long (iNum / S2US) );
m_tIn.SetTimeoutUS ( iNum );
iRes = 1;
break;
case BIO_CTRL_DGRAM_GET_RECV_TIMEOUT:
iRes = (long)m_tIn.GetTimeoutUS();
sphLogDebugv ( BACKN "~~ BioBackCtrl (%p) get recv tm %lds" NORM, this, long (iRes / S2US) );
break;
case BIO_CTRL_DGRAM_SET_SEND_TIMEOUT:
sphLogDebugv ( BACKN "~~ BioBackCtrl (%p) set send tm %lds" NORM, this, long (iNum / S2US) );
m_tOut.SetWTimeoutUS ( iNum );
iRes = 1;
break;
case BIO_CTRL_DGRAM_GET_SEND_TIMEOUT:
iRes = (long)m_tOut.GetWTimeoutUS();
sphLogDebugv ( BACKN "~~ BioBackCtrl (%p) get recv tm %lds" NORM, this, long (iRes / S2US) );
break;
case BIO_CTRL_FLUSH:
sphLogDebugv ( BACKN "~~ BioBackCtrl (%p) flush" NORM, this );
iRes = m_tOut.Flush () ? 1 : -1;
break;
case BIO_CTRL_PENDING:
iRes = Max (0, m_tIn.HasBytes () );
sphLogDebugv ( BACKN "~~ BioBackCtrl (%p) read pending, has %ld" NORM, this, iRes );
break;
case BIO_CTRL_EOF:
iRes = m_tIn.GetError() ? 1 : 0;
sphLogDebugv ( BACKN "~~ BioBackCtrl (%p) eof, is %ld" NORM, this, iRes );
break;
case BIO_CTRL_WPENDING:
iRes = m_tOut.GetSentCount ();
sphLogDebugv ( BACKN "~~ BioBackCtrl (%p) write pending, has %ld" NORM, this, iRes );
break;
case BIO_CTRL_PUSH:
sphLogDebugv ( BACKN "~~ BioBackCtrl (%p) push %p, ignore" NORM, this, pPtr );
break;
case BIO_CTRL_POP:
sphLogDebugv ( BACKN "~~ BioBackCtrl (%p) pop %p, ignore" NORM, this, pPtr );
break;
default:
sphLogDebugv ( BACKN "~~ BioBackCtrl (%p) with %d, %ld, %p" NORM, this, iCmd, iNum, pPtr );
}
return iRes;
}
};
#if ( OPENSSL_VERSION_NUMBER < 0x1010000fL)
#define BIO_set_shutdown(pBio,CODE) pBio->shutdown = CODE
#define BIO_get_shutdown(pBio) pBio->shutdown
#define BIO_set_init(pBio,CODE) pBio->init = CODE
#define BIO_set_data(pBio,DATA) pBio->ptr = DATA
#define BIO_get_data(pBio) pBio->ptr
#define BIO_meth_free(pMethod) delete pMethod
#define BIO_get_new_index() (24)
#define BIO_meth_set_create(pMethod,pFn) pMethod->create = pFn
#define BIO_meth_set_destroy(pMethod, pFn ) pMethod->destroy = pFn
#define BIO_meth_set_read(pMethod,pFn) pMethod->bread = pFn
#define BIO_meth_set_write(pMethod,pFn) pMethod->bwrite = pFn
#define BIO_meth_set_ctrl(pMethod,pFn) pMethod->ctrl = pFn
inline static BIO_METHOD * BIO_meth_new ( int iType, const char * szName )
{
auto pMethod = new BIO_METHOD;
memset ( pMethod, 0, sizeof ( BIO_METHOD ) );
pMethod->type = iType;
pMethod->name = szName;
return pMethod;
}
#endif // OPENSSL_VERSON_NUMBER dependent code
static int MyBioCreate ( BIO * pBio )
{
sphLogDebugv ( BACKN "~~ MyBioCreate called with %p" NORM, pBio );
BIO_set_shutdown ( pBio, BIO_CLOSE );
BIO_set_init ( pBio, 0 ); // without it write, read will not be called
BIO_set_data ( pBio, nullptr );
BIO_clear_flags ( pBio, ~0 );
return 1;
}
static int MyBioDestroy ( BIO * pBio )
{
sphLogDebugv ( BACKN "~~ MyBioDestroy called with %p" NORM, pBio );
if ( !pBio )
return 0;
auto pAdapter = ( BioAsyncNetAdapter_c*) BIO_get_data ( pBio );
assert ( pAdapter );
SafeDelete ( pAdapter );
if ( BIO_get_shutdown ( pBio ) )
{
BIO_clear_flags ( pBio, ~0 );
BIO_set_init ( pBio, 0 );
}
return 1;
}
static int MyBioWrite ( BIO * pBio, const char * cBuf, int iNum )
{
auto pAdapter = (BioAsyncNetAdapter_c *) BIO_get_data ( pBio );
assert ( pAdapter );
return pAdapter->BioWrite ( cBuf, iNum );
}
static int MyBioRead ( BIO * pBio, char * cBuf, int iNum )
{
auto pAdapter = (BioAsyncNetAdapter_c *) BIO_get_data ( pBio );
assert ( pAdapter );
return pAdapter->BioRead ( cBuf, iNum );
}
static long MyBioCtrl ( BIO * pBio, int iCmd, long iNum, void * pPtr )
{
auto pAdapter = (BioAsyncNetAdapter_c *) BIO_get_data ( pBio );
assert ( pAdapter );
return pAdapter->BioCtrl ( iCmd, iNum, pPtr );
}
static BIO_METHOD * BIO_s_coroAsync ( bool bDestroy )
{
static BIO_METHOD * pMethod = nullptr;
if ( bDestroy && pMethod )
{
sphLogDebugv ( FRONT "~~ BIO_s_coroAsync (%d)" NORM, !!bDestroy );
BIO_meth_free ( pMethod );
pMethod = nullptr;
} else if ( !bDestroy && !pMethod )
{
sphLogDebugv ( FRONT "~~ BIO_s_coroAsync (%d)" NORM, !!bDestroy );
pMethod = BIO_meth_new ( BIO_get_new_index () | BIO_TYPE_DESCRIPTOR | BIO_TYPE_SOURCE_SINK, "async sock coroutine" );
BIO_meth_set_create ( pMethod, MyBioCreate );
BIO_meth_set_destroy ( pMethod, MyBioDestroy );
BIO_meth_set_read ( pMethod, MyBioRead );
BIO_meth_set_write ( pMethod, MyBioWrite );
BIO_meth_set_ctrl ( pMethod, MyBioCtrl );
}
return pMethod;
}
static BIO * BIO_new_coroAsync ( std::unique_ptr<AsyncNetBuffer_c> pSource )
{
auto pBio = BIO_new ( BIO_s_coroAsync ());
BIO_set_data ( pBio, new BioAsyncNetAdapter_c ( std::move ( pSource ) ) );
BIO_set_init ( pBio, 1 );
return pBio;
}
using BIOPtr_c = SharedPtrCustom_t<BIO>;
class AsyncSSBufferedSocket_c final : public AsyncNetBuffer_c
{
BIOPtr_c m_pSslBackend;
int64_t m_iSendTotal = 0;
int64_t m_iReceivedTotal = 0;
bool SendBuffer ( const VecTraits_T<BYTE> & dData ) final
{
assert ( m_pSslBackend );
CSphScopedProfile tProf ( m_pProfile, SPH_QSTATE_NET_WRITE );
sphLogDebugv ( FRONT "~~ BioFrontWrite (%p) %d bytes" NORM, (BIO*)m_pSslBackend, dData.GetLength () );
int iSent = 0;
if ( !dData.IsEmpty ())
iSent = BIO_write ( m_pSslBackend, dData.begin (), dData.GetLength () );
auto iRes = BIO_flush ( m_pSslBackend );
sphLogDebugv ( FRONT ">> BioFrontWrite (%p) done (%d) %d bytes of %d" NORM, (BIO*)m_pSslBackend, iRes, iSent, dData.GetLength () );
m_iSendTotal += dData.GetLength();
return ( iRes>0 );
}
int ReadFromBackend ( int iNeed, int iSpace, bool ) final
{
assert ( iNeed <= iSpace );
auto dBuf = AllocateBuffer ( iSpace );
int iHaveSpace = dBuf.GetLength();
auto pBuf = dBuf.begin();
int iGotTotal = 0;
while ( iNeed>0 )
{
auto iPending = BIO_pending( m_pSslBackend );
if ( !iPending && BIO_eof ( m_pSslBackend ))
{
sphLogDebugv ( FRONT "~~ BIO_eof on frontend. Bailing" NORM );
return -1;
}
auto iCanRead = Max ( iNeed, Min ( iHaveSpace, iPending ));
sphLogDebugv ( FRONT "~~ BioReadFront %d..%d, can %d, pending %d" NORM,
iNeed, iHaveSpace, iCanRead, iPending );
int iGot = BIO_read ( m_pSslBackend, pBuf, iCanRead );
sphLogDebugv ( FRONT "<< BioReadFront (%p) done %d from %d..%d" NORM,
(BIO *) m_pSslBackend, iGot, iNeed, iHaveSpace );
pBuf += iGot;
iGotTotal += iGot;
iNeed -= iGot;
iHaveSpace -= iGot;
if ( !iGot )
{
sphLogDebugv ( FRONT "<< BioReadFront (%p) breaking on %d" NORM,
(BIO *) m_pSslBackend, iGotTotal );
break;
}
}
m_iReceivedTotal += iGotTotal;
return iGotTotal;
}
public:
explicit AsyncSSBufferedSocket_c ( BIOPtr_c pSslFrontend )
: m_pSslBackend ( std::move ( pSslFrontend ) )
{}
void SetWTimeoutUS ( int64_t iTimeoutUS ) final
{
BIO_ctrl ( m_pSslBackend, BIO_CTRL_DGRAM_SET_SEND_TIMEOUT, (long)iTimeoutUS, nullptr );
}
int64_t GetWTimeoutUS () const final
{
return BIO_ctrl ( m_pSslBackend, BIO_CTRL_DGRAM_GET_SEND_TIMEOUT, 0, nullptr );
}
void SetTimeoutUS ( int64_t iTimeoutUS ) final
{
BIO_ctrl ( m_pSslBackend, BIO_CTRL_DGRAM_SET_RECV_TIMEOUT, (long)iTimeoutUS, nullptr );
}
int64_t GetTimeoutUS () const final
{
return BIO_ctrl ( m_pSslBackend, BIO_CTRL_DGRAM_GET_RECV_TIMEOUT, 0, nullptr );
}
int64_t GetTotalSent () const final
{
return m_iSendTotal;
}
int64_t GetTotalReceived() const final
{
return m_iReceivedTotal;
}
};
bool MakeSecureLayer ( std::unique_ptr<AsyncNetBuffer_c>& pSource )
{
auto pCtx = GetReadySslCtx();
if ( !pCtx )
return false;
BIOPtr_c pFrontEnd ( BIO_new_ssl ( pCtx, 0 ), [pCtx] (BIO * pBio) {
BIO_free_all ( pBio );
} );
SSL * pSSL = nullptr;
BIO_get_ssl ( pFrontEnd, &pSSL );
SSL_set_mode ( pSSL, SSL_MODE_AUTO_RETRY );
BIO_push ( pFrontEnd, BIO_new_coroAsync ( std::move ( pSource ) ) );
pSource = std::make_unique<AsyncSSBufferedSocket_c> ( std::move ( pFrontEnd ) );
return true;
}
#else
// these stubs for non-daemon (i.e. for tests)
void SetServerSSLKeys ( const CSphString & , const CSphString & , const CSphString & ) {}
bool CheckWeCanUseSSL ( CSphString * pError )
{
if ( pError )
*pError="daemon built without SSL support";
return false;
}
bool MakeSecureLayer ( std::unique_ptr<AsyncNetBuffer_c> & ) { return false; }
#endif
#endif
| 15,695
|
C++
|
.cpp
| 492
| 29.512195
| 133
| 0.693056
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,892
|
indexfiles.cpp
|
manticoresoftware_manticoresearch/src/indexfiles.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "indexfiles.h"
#include "sphinxstd.h"
#include "fileio.h"
#include "fileutils.h"
#include "sphinxint.h"
#include "tokenizer/tokenizer.h"
static IndexFileExt_t g_dIndexFilesExts[SPH_EXT_TOTAL] =
{
{ SPH_EXT_SPH, ".sph", 1, false, true, "header file" },
{ SPH_EXT_SPA, ".spa", 1, true, true, "attribute values" },
{ SPH_EXT_SPB, ".spb", 50, true, true, "var-length attrs: strings, jsons, mva" },
{ SPH_EXT_SPC, ".spc", 61, true, true, "columnar storage" },
{ SPH_EXT_SPI, ".spi", 1, false, true, "dictionary (aka wordlist)" },
{ SPH_EXT_SPD, ".spd", 1, false, true, "document lists (aka doclists)"},
{ SPH_EXT_SPP, ".spp", 3, false, true, "keyword positions lists (aka hitlists)" },
{ SPH_EXT_SPK, ".spk", 10, true, true, "kill list (aka klist)" },
{ SPH_EXT_SPE, ".spe", 31, false, true, "skip-lists to speed up doc-list filtering" },
{ SPH_EXT_SPM, ".spm", 4, false, true, "dead row map" },
{ SPH_EXT_SPT, ".spt", 53, true, true, "docid lookup table" },
{ SPH_EXT_SPHI, ".sphi", 53, true, true, "secondary index histograms" },
{ SPH_EXT_SPDS, ".spds", 57, true, true, "document storage" },
{ SPH_EXT_SPL, ".spl", 1, true, false, "file lock for the table" },
{ SPH_EXT_SETTINGS, ".settings", 1, true, false, "table runtime settings" },
{ SPH_EXT_SPIDX, ".spidx", 62, true, true, "secondary index" },
{ SPH_EXT_SPJIDX, ".spjidx", 66, true, true, "secondary index for json attributes" },
{ SPH_EXT_SPKNN, ".spknn", 65, true, true, "knn index" }
};
const char* sphGetExt ( ESphExt eExt )
{
if ( eExt>=SPH_EXT_TOTAL )
return "";
return g_dIndexFilesExts[eExt].m_szExt;
}
CSphVector<IndexFileExt_t> sphGetExts()
{
// we may add support for older index versions in the future
CSphVector<IndexFileExt_t> dResult;
for (const auto & dIndexFilesExt : g_dIndexFilesExts)
dResult.Add ( dIndexFilesExt );
return dResult;
}
//////////////////////////////////////////////////////////////////////////
CSphString IndexFiles_c::FatalMsg ( const char * sMsg )
{
CSphString sFatalMsg;
if ( sMsg )
{
if ( m_sIndexName.IsEmpty () )
sFatalMsg.SetSprintf ( "%s: %s", sMsg, ErrorMsg () );
else
sFatalMsg.SetSprintf ( "%s table '%s': %s", sMsg, m_sIndexName.cstr(), ErrorMsg() );
} else
{
if ( m_sIndexName.IsEmpty () )
sFatalMsg.SetSprintf ( "%s", ErrorMsg () );
else
sFatalMsg.SetSprintf ( "table '%s': %s", m_sIndexName.cstr (), ErrorMsg () );
}
return sFatalMsg;
}
CSphString IndexFiles_c::FullPath ( const char * szExt, const CSphString& sSuffix, const CSphString& sBase )
{
StringBuilder_c sResult;
sResult << (sBase.IsEmpty() ? GetFilebase() : sBase) << sSuffix << szExt;
return (CSphString)sResult;
}
CSphString IndexFiles_c::MakePath ( const char* szSuffix, const CSphString& sBase )
{
StringBuilder_c sResult;
sResult << sBase << szSuffix;
return (CSphString)sResult;
}
CSphString IndexFiles_c::MakePath ( const char * szSuffix )
{
return MakePath ( szSuffix, GetFilebase() );
}
bool IndexFiles_c::HasAllFiles ( const char * sType )
{
for ( const auto & dExt : g_dIndexFilesExts )
{
if ( m_uVersion<dExt.m_uMinVer || dExt.m_bOptional )
continue;
if ( !sphIsReadable ( FullPath ( dExt.m_szExt, sType ) ) )
return false;
}
return true;
}
void IndexFiles_c::Unlink ( const char * szType )
{
for ( const auto &dExt : g_dIndexFilesExts )
{
auto sFile = FullPath ( dExt.m_szExt, szType );
if ( ::unlink ( sFile.cstr() ) && !dExt.m_bOptional )
sphWarning ( "unlink failed (file '%s', error '%s'", sFile.cstr (), strerrorm ( errno ) );
}
}
void IndexFiles_c::UnlinkExisted()
{
for ( const auto & tExt : g_dIndexFilesExts )
{
auto sFile = FullPath ( tExt.m_szExt );
if ( sphIsReadable ( sFile.cstr() ) && ::unlink ( sFile.cstr() ) && !tExt.m_bOptional )
sphWarning ( "unlink failed (file '%s', error '%s'", sFile.cstr (), strerrorm ( errno ) );
}
}
bool IndexFiles_c::TryRename ( const CSphString& sFrom, const CSphString& sTo ) // move files between different bases
{
m_bFatal = false;
bool bRenamed[SPH_EXT_TOTAL] = { false };
bool bAllOk = true;
for ( int i = 0; i<SPH_EXT_TOTAL; i++ )
{
const auto & dExt = g_dIndexFilesExts[i];
if ( m_uVersion<dExt.m_uMinVer || !dExt.m_bCopy )
continue;
auto sFullFrom = FullPath ( dExt.m_szExt, "", sFrom );
auto sFullTo = FullPath ( dExt.m_szExt, "", sTo );
#if _WIN32
::unlink ( sFullTo.cstr() );
sphLogDebug ( "%s unlinked", sTo.cstr() );
#endif
if ( sph::rename ( sFullFrom.cstr (), sFullTo.cstr () ) )
{
// this is no reason to fail if not necessary files missed.
if ( dExt.m_bOptional )
continue;
m_sLastError.SetSprintf ( "rename %s to %s failed: %s", sFullFrom.cstr (), sFullTo.cstr (), strerrorm ( errno ) );
bAllOk = false;
break;
}
bRenamed[i] = true;
}
if ( bAllOk )
return true;
for ( int i = 0; i<SPH_EXT_TOTAL; ++i )
{
if ( !bRenamed[i] )
continue;
const auto & dExt = g_dIndexFilesExts[i];
auto sFullFrom = FullPath ( dExt.m_szExt, "", sTo );
auto sFullTo = FullPath ( dExt.m_szExt, "", sFrom );
if ( sph::rename ( sFullFrom.cstr (), sFullTo.cstr () ) )
{
sphLogDebug ( "rollback failure when renaming %s to %s", sFullFrom.cstr (), sFullTo.cstr () );
m_bFatal = true;
}
}
return false;
}
bool IndexFiles_c::RenameLock ( const CSphString& sTo, int &iLockFD )
{
if ( iLockFD<0 ) // no lock, no renaming need
return true;
m_bFatal = false;
auto sFullFrom = FullPath ( sphGetExt(SPH_EXT_SPL) );
auto sFullTo = FullPath ( sphGetExt(SPH_EXT_SPL), "", sTo );
#if !_WIN32
if ( !sph::rename ( sFullFrom.cstr (), sFullTo.cstr () ) )
return true;
m_sLastError.SetSprintf ("failed to rename lock %s to %s, fd=%d, error %s (%d); ", sFullFrom.cstr(), sFullTo.cstr(), iLockFD, strerrorm ( errno ), errno );
// that is renaming of only 1 file failed; no need to rollback.
m_bFatal = true;
return false;
#else
// on Windows - no direct rename. Lock new instead, release previous.
int iNewLock=-1;
if ( !RawFileLock ( sFullTo, iNewLock, m_sLastError ) )
return false;
auto iOldLock = iLockFD;
iLockFD = iNewLock;
RawFileUnLock ( sFullFrom, iOldLock );
return true;
#endif
}
// move from backup to path using full (long) paths; fail is fatal
bool IndexFiles_c::Rename ( const CSphString& sFrom, const CSphString& sTo )
{
for ( const auto &dExt : g_dIndexFilesExts )
{
auto sFullFrom = FullPath ( dExt.m_szExt, "", sFrom );
auto sFullTo = FullPath ( dExt.m_szExt, "", sTo );
if ( !sphIsReadable ( sFullFrom ) )
{
::unlink ( sFullTo.cstr () );
continue;
}
#if _WIN32
::unlink ( sFullTo.cstr() );
sphLogDebug ( "%s unlinked", sFullTo.cstr() );
#endif
if ( sph::rename ( sFullFrom.cstr (), sFullTo.cstr () ) )
{
sphLogDebug ( "rename %s to %s failed: %s", sFullFrom.cstr (), sFullTo.cstr (), strerrorm ( errno ) );
return false;
}
}
return true;
}
// move everything except not intended for copying.
bool IndexFiles_c::TryRenameSuffix ( const CSphString& sFromSuffix, const CSphString& sToSuffix )
{
return TryRename ( FullPath ( "", sFromSuffix ), FullPath ( "", sToSuffix ) );
}
bool IndexFiles_c::TryRenameBase ( const CSphString& sToBase ) // move files to different base
{
return TryRename ( FullPath ( "" ), sToBase );
}
bool IndexFiles_c::RelocateToNew ( const CSphString& sNewBase )
{
return Rename ( FullPath ( "", "", sNewBase ), FullPath ( "", ".new" ) );
}
bool IndexFiles_c::RenameSuffix ( const CSphString& sFrom, const CSphString& sTo )
{
return Rename ( FullPath ( "", sFrom ), FullPath ( "", sTo ) );
}
bool IndexFiles_c::CheckHeader ( const char * sType )
{
auto sPath = FullPath ( sphGetExt(SPH_EXT_SPH), sType );
BYTE dBuffer[8];
CSphAutoreader rdHeader ( dBuffer, sizeof ( dBuffer ) );
if ( !rdHeader.Open ( sPath, m_sLastError ) )
return false;
// check magic header
auto uMagic = rdHeader.GetDword();
if ( dBuffer[0] == '{' ) // that is new style json header, no need to check further...
return true;
const char* sMsg = CheckFmtMagic ( uMagic );
if ( sMsg )
{
m_sLastError.SetSprintf ( sMsg, sPath.cstr() );
return false;
}
// get version
DWORD uVersion = rdHeader.GetDword ();
if ( uVersion==0 || uVersion>INDEX_FORMAT_VERSION )
{
m_sLastError.SetSprintf ( "%s is v.%u, binary is v.%u", sPath.cstr(), uVersion, INDEX_FORMAT_VERSION );
return false;
}
m_uVersion = uVersion;
return true;
}
bool IndexFiles_c::ReadKlistTargets ( StrVec_t & dTargets, const char * szType )
{
CSphString sPath = FullPath ( sphGetExt(SPH_EXT_SPK), szType );
if ( !sphIsReadable(sPath) )
return true;
CSphString sError;
CSphAutoreader tReader;
if ( !tReader.Open ( sPath, sError ) )
return false;
DWORD nIndexes = tReader.GetDword();
dTargets.Resize ( nIndexes );
for ( auto & i : dTargets )
{
i = tReader.GetString();
tReader.GetDword(); // skip flags
}
return true;
}
| 9,310
|
C++
|
.cpp
| 271
| 32.125461
| 156
| 0.671004
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,893
|
aggrexpr.cpp
|
manticoresoftware_manticoresearch/src/aggrexpr.cpp
|
//
// Copyright (c) 2017-2023, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include <cfloat>
#include <climits>
#include <math.h>
#include "datetime.h"
#include "exprtraits.h"
#include "sphinxjsonquery.h"
#include "sphinxint.h"
#include "aggrexpr.h"
// the aggr range implementation
template < bool FLOAT >
class AggrRangeExpr_T : public Expr_ArgVsSet_T<int>
{
AggrRangeSetting_t m_tRanges;
public:
AggrRangeExpr_T ( ISphExpr * pAttr, const AggrRangeSetting_t & tRanges )
: Expr_ArgVsSet_T ( pAttr )
, m_tRanges ( tRanges )
{
}
int IntEval ( const CSphMatch & tMatch ) const final
{
int iBucket = GetBucket ( tMatch );
return iBucket;
}
protected:
int GetBucket ( const CSphMatch & tMatch ) const
{
if_const ( FLOAT )
{
double fVal = m_pArg->Eval ( tMatch );
if ( m_tRanges.m_bOpenLeft && fVal<m_tRanges.First().m_fTo )
return m_tRanges.First().m_iIdx;
if ( m_tRanges.m_bOpenRight && fVal>=m_tRanges.Last().m_fFrom )
return m_tRanges.Last().m_iIdx;
if ( !m_tRanges.m_bOpenLeft && fVal<m_tRanges.First().m_fFrom )
return m_tRanges.GetLength();
if ( !m_tRanges.m_bOpenRight && fVal>=m_tRanges.Last().m_fTo )
return m_tRanges.GetLength();
int iItem = m_tRanges.GetFirst ([&](const RangeSetting_t& tRange) { return (tRange.m_fFrom<=fVal && fVal<tRange.m_fTo); });
if ( iItem==-1 )
return m_tRanges.GetLength();
return m_tRanges[iItem].m_iIdx;
} else
{
int64_t iVal = m_pArg->Int64Eval ( tMatch );
if ( m_tRanges.m_bOpenLeft && iVal<m_tRanges.First().m_iTo )
return m_tRanges.First().m_iIdx;
if ( m_tRanges.m_bOpenRight && iVal>=m_tRanges.Last().m_iFrom )
return m_tRanges.Last().m_iIdx;
if ( !m_tRanges.m_bOpenLeft && iVal<m_tRanges.First().m_iFrom )
return m_tRanges.GetLength();
if ( !m_tRanges.m_bOpenRight && iVal>=m_tRanges.Last().m_iTo )
return m_tRanges.GetLength();
int iItem = m_tRanges.GetFirst ( [&] ( const RangeSetting_t & tRange ) { return ( tRange.m_iFrom<=iVal && iVal<tRange.m_iTo ); } );
if ( iItem==-1 )
return m_tRanges.GetLength();
return m_tRanges[iItem].m_iIdx;
}
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("AggrRangeExpr_T");
return CALC_DEP_HASHES();
}
ISphExpr * Clone() const final
{
return new AggrRangeExpr_T ( *this );
}
private:
AggrRangeExpr_T ( const AggrRangeExpr_T & rhs )
: Expr_ArgVsSet_T ( rhs )
, m_tRanges ( rhs.m_tRanges )
{
}
};
ISphExpr * CreateExprRange ( ISphExpr * pAttr, const AggrRangeSetting_t & tRanges )
{
if ( tRanges.m_bFloat )
return new AggrRangeExpr_T<true> ( pAttr, tRanges );
else
return new AggrRangeExpr_T<false> ( pAttr, tRanges );
}
static void DumpRange ( const RangeSetting_t & tRange, bool bFloat, StringBuilder_c & sRes )
{
if ( bFloat )
sRes.Appendf ( ", {range_from=%f, range_to=%f}", tRange.m_fFrom, tRange.m_fTo );
else
sRes.Appendf ( ", {range_from=" INT64_FMT ", range_to=" INT64_FMT "}", tRange.m_iFrom, tRange.m_iTo );
}
static void DumpRangeOpenLeft ( const RangeSetting_t & tRange, bool bFloat, StringBuilder_c & sRes )
{
if ( bFloat )
sRes.Appendf ( ", {range_to=%f}", tRange.m_fTo );
else
sRes.Appendf ( ", {range_to=" INT64_FMT "}", tRange.m_iTo );
}
static void DumpRangeOpenRigth ( const RangeSetting_t & tRange, bool bFloat, StringBuilder_c & sRes )
{
if ( bFloat )
sRes.Appendf ( ", {range_from=%f}", tRange.m_fFrom );
else
sRes.Appendf ( ", {range_from=" INT64_FMT "}", tRange.m_iFrom );
}
static void DumpRange ( int iItem, const AggrRangeSetting_t & tRanges, StringBuilder_c & sRes )
{
const bool bFloat = tRanges.m_bFloat;
const RangeSetting_t & tRange = tRanges[iItem];
if ( iItem==0 && tRanges.m_bOpenLeft )
DumpRangeOpenLeft ( tRange, bFloat, sRes );
else if ( iItem==tRanges.GetLength()-1 && tRanges.m_bOpenRight )
DumpRangeOpenRigth ( tRange, bFloat, sRes );
else
DumpRange ( tRange, bFloat, sRes );
}
static CSphString DumpAggrRange ( const CSphString & sCol, const AggrRangeSetting_t & tRanges )
{
assert ( tRanges.GetLength() );
StringBuilder_c sRes;
sRes.Appendf ( "range(%s", sCol.cstr() );
ARRAY_FOREACH ( i, tRanges )
DumpRange ( i, tRanges, sRes );
sRes += ")";
return CSphString ( sRes );
}
static CSphString DumpAggrRange ( const CSphString & sCol, const AggrDateRangeSetting_t & tRanges )
{
assert ( tRanges.GetLength() );
StringBuilder_c sRes;
sRes.Appendf ( "date_range(%s", sCol.cstr() );
for ( DateRangeSetting_t & tRange : tRanges )
{
if ( !tRange.m_sFrom.IsEmpty() && !tRange.m_sTo.IsEmpty() )
sRes.Appendf ( ", {range_from='%s', range_to='%s'}", tRange.m_sFrom.cstr(), tRange.m_sTo.cstr() );
else if ( tRange.m_sFrom.IsEmpty() )
sRes.Appendf ( ", {range_to='%s'}", tRange.m_sTo.cstr() );
else
sRes.Appendf ( ", {range_from='%s'}", tRange.m_sFrom.cstr() );
}
sRes += ")";
return CSphString ( sRes );
}
bool ParseAggrRange ( const VecTraits_T< VecTraits_T < CSphNamedVariant > > & dSrcRanges, bool bDate, int iNow, AggrRangeSetting_t & tRanges, CSphString & sError )
{
if ( dSrcRanges.IsEmpty() )
{
sError = "at least 1 range expected";
return false;
}
ARRAY_FOREACH ( iItem, dSrcRanges )
{
const auto & dItem = dSrcRanges[iItem];
if ( dItem.IsEmpty() )
{
sError.SetSprintf ( "empty range %d", iItem );
return false;
}
bool bHasFrom = false;
bool bHasTo = false;
bool bFloatFrom = false;
bool bFloatTo = false;
auto & tRange = tRanges.Add();
tRange.m_iIdx = iItem;
ARRAY_FOREACH ( iVal, dItem )
{
const auto & tVal = dItem[iVal];
if ( tVal.m_sKey=="range_from" )
{
if ( tVal.m_eType==VariantType_e::BIGINT )
tRange.m_iFrom = tVal.m_iValue;
else if ( tVal.m_eType==VariantType_e::FLOAT )
{
tRange.m_fFrom = tVal.m_fValue;
bFloatFrom = true;
} else if ( tVal.m_eType==VariantType_e::STRING )
{
time_t tFrom = 0;
if ( !ParseDateMath ( tVal.m_sValue, iNow, tFrom ) )
{
sError.SetSprintf ( "date_range invalid from value '%s'", tVal.m_sValue.cstr() );
return false;
}
tRange.m_iFrom = tFrom;
} else
{
sError.SetSprintf ( "%s %d invalid value type %d", ( bDate ? "date_range" : "range" ), iItem, (int)tVal.m_eType );
return false;
}
bHasFrom = true;
} else if ( tVal.m_sKey=="range_to" )
{
if ( tVal.m_eType==VariantType_e::BIGINT )
tRange.m_iTo = tVal.m_iValue;
else if ( tVal.m_eType==VariantType_e::FLOAT )
{
tRange.m_fTo = tVal.m_fValue;
bFloatTo = true;
} else if ( tVal.m_eType==VariantType_e::STRING )
{
time_t tTo = 0;
if ( !ParseDateMath ( tVal.m_sValue, iNow, tTo ) )
{
sError.SetSprintf ( "date_range invalid to value '%s'", tVal.m_sValue.cstr() );
return false;
}
tRange.m_iTo = tTo;
} else
{
sError.SetSprintf ( "%s %d invalid value type %d", ( bDate ? "date_range" : "range" ), iItem, (int)tVal.m_eType );
return false;
}
bHasTo = true;
}
}
if ( !bHasFrom && !bHasTo )
{
sError.SetSprintf ( "empty %s %d", ( bDate ? "date_range" : "range" ), iItem );
return false;
}
if ( !bHasFrom )
{
tRanges.m_bOpenLeft = true;
if ( bFloatFrom || bFloatTo || tRanges.m_bFloat )
tRange.m_fFrom = -FLT_MAX;
else
tRange.m_iFrom = INT64_MIN;
}
if ( !bHasTo )
{
tRanges.m_bOpenRight = true;
if ( bFloatFrom || bFloatTo || tRanges.m_bFloat )
tRange.m_fTo = FLT_MAX;
else
tRange.m_iTo = INT64_MAX;
}
// convert both values to float
if ( bFloatFrom^bFloatTo )
{
if ( bFloatFrom )
tRange.m_fTo = tRange.m_iTo;
else
tRange.m_fFrom = tRange.m_iFrom;
} else if ( tRanges.m_bFloat && !( bFloatFrom && bFloatTo ) )
{
tRange.m_fTo = tRange.m_iTo;
tRange.m_fFrom = tRange.m_iFrom;
}
// convert all prevoiuse values into floats
if ( ( bFloatFrom || bFloatTo ) && !tRanges.m_bFloat )
{
if ( tRanges.GetLength()>1 )
{
tRanges.Slice( 0, tRanges.GetLength()-1 ).for_each ( [] ( auto & tRange )
{
tRange.m_fFrom = tRange.m_iFrom;
tRange.m_fTo = tRange.m_iTo;
} );
if ( tRanges.m_bOpenLeft )
tRanges[0].m_fFrom = -FLT_MAX;
}
tRanges.m_bFloat = true;
}
if ( tRanges.m_bFloat )
tRanges.Sort ( ::bind ( &RangeSetting_t::m_fFrom ) );
else
tRanges.Sort ( ::bind ( &RangeSetting_t::m_iFrom ) );
}
return true;
}
CSphString GetAggrName ( int iItem, const CSphString & sCol )
{
CSphString sName;
sName.SetSprintf ( "aggs_%d_%s", iItem, sCol.cstr() );
return sName;
}
static void FormatKeyFloat ( const RangeSetting_t & tRange, bool bHasFrom, bool bHasTo, RangeKeyDesc_t & tDesc )
{
assert ( bHasFrom || bHasTo );
if ( bHasFrom )
tDesc.m_sFrom.SetSprintf ( "%f", tRange.m_fFrom );
if ( bHasTo )
tDesc.m_sTo.SetSprintf ( "%f", tRange.m_fTo );
if ( bHasFrom && bHasTo )
tDesc.m_sKey.SetSprintf ( "%f-%f", tRange.m_fFrom, tRange.m_fTo );
else if ( bHasTo )
tDesc.m_sKey.SetSprintf ( "*-%f", tRange.m_fTo );
else
tDesc.m_sKey.SetSprintf ( "%f-*", tRange.m_fFrom );
}
static void FormatKeyInt ( const RangeSetting_t & tRange, bool bHasFrom, bool bHasTo, RangeKeyDesc_t & tDesc )
{
assert ( bHasFrom || bHasTo );
if ( bHasFrom )
tDesc.m_sFrom.SetSprintf ( INT64_FMT, tRange.m_iFrom );
if ( bHasTo )
tDesc.m_sTo.SetSprintf ( INT64_FMT, tRange.m_iTo );
if ( bHasFrom && bHasTo )
tDesc.m_sKey.SetSprintf ( INT64_FMT "-" INT64_FMT, tRange.m_iFrom, tRange.m_iTo );
else if ( bHasTo )
tDesc.m_sKey.SetSprintf ( "*-" INT64_FMT, tRange.m_iTo );
else
tDesc.m_sKey.SetSprintf ( INT64_FMT "-*", tRange.m_iFrom );
}
static void FormatDate ( const CSphString & sVal, int iNow, CSphString & sRes )
{
time_t tSrcDate;
Verify ( ParseDateMath ( sVal, iNow, tSrcDate ) );
FormatDate ( tSrcDate, sRes );
}
static const char * g_sCompatDateFormat = "%Y-%m-%dT%H:%M:%S"; // YYYY-mm-dd'T'HH:mm:ss.SSS'Z'
static void FormatDate ( time_t tDate, char * sBuf, int iSize )
{
std::tm tDstDate;
gmtime_r ( &tDate, &tDstDate );
Verify ( strftime ( sBuf, iSize, g_sCompatDateFormat, &tDstDate )>0 );
}
void FormatDate ( time_t tDate, CSphString & sRes )
{
char sBuf[128];
FormatDate ( tDate, sBuf, sizeof(sBuf)-1 );
sRes = sBuf;
}
void FormatDate ( time_t tDate, StringBuilder_c & sRes )
{
char sBuf[128];
FormatDate ( tDate, sBuf, sizeof(sBuf)-1 );
sRes.Appendf ( "%s", sBuf );
}
static void FormatKeyDate ( const DateRangeSetting_t & tRange, int iNow, RangeKeyDesc_t & tDesc )
{
CSphString sFrom;
CSphString sTo;
const bool bHasFrom = !tRange.m_sFrom.IsEmpty();
const bool bHasTo = !tRange.m_sTo.IsEmpty();
assert ( bHasFrom || bHasTo );
assert ( iNow>0 );
if ( bHasFrom )
{
FormatDate ( tRange.m_sFrom, iNow, sFrom );
tDesc.m_sFrom.SetSprintf ( "%s", sFrom.cstr() );
}
if ( bHasTo )
{
FormatDate ( tRange.m_sTo, iNow, sTo );
tDesc.m_sTo.SetSprintf ( "%s", sTo.cstr() );
}
if ( bHasFrom && bHasTo )
tDesc.m_sKey.SetSprintf ( "%s-%s", sFrom.cstr(), sTo.cstr() );
else if ( bHasTo )
tDesc.m_sKey.SetSprintf ( "*-%s", sTo.cstr() );
else
tDesc.m_sKey.SetSprintf ( "%s-*", sFrom.cstr() );
}
void GetRangeKeyNames ( const AggrRangeSetting_t & tRanges, RangeNameHash_t & hRangeNames )
{
if ( tRanges.GetLength()==1 && tRanges.m_bOpenLeft && tRanges.m_bOpenRight )
{
auto & tDesc = hRangeNames.AddUnique ( 0 );
tDesc.m_sKey = "*-*";
return;
}
ARRAY_FOREACH ( i, tRanges )
{
const auto & tSrc = tRanges[i];
auto & tDesc = hRangeNames.AddUnique ( i );
bool bHasFrom = true;
bool bHasTo = true;
if ( i==0 && tRanges.m_bOpenLeft )
bHasFrom = false;
else if ( i==tRanges.GetLength()-1 && tRanges.m_bOpenRight )
bHasTo = false;
if ( tRanges.m_bFloat )
FormatKeyFloat ( tSrc, bHasFrom, bHasTo, tDesc );
else
FormatKeyInt ( tSrc, bHasFrom, bHasTo, tDesc );
}
}
void GetRangeKeyNames ( const AggrDateRangeSetting_t & tRanges, int iNow, RangeNameHash_t & hRangeNames )
{
ARRAY_FOREACH ( i, tRanges )
{
const auto & tSrc = tRanges[i];
auto & tDesc = hRangeNames.AddUnique ( i );
FormatKeyDate ( tSrc, iNow, tDesc );
}
}
static CSphString DumpAggrHist ( const CSphString & sCol, const AggrHistSetting_t & tHist )
{
StringBuilder_c sRes;
sRes.Appendf ( "histogram(%s, {", sCol.cstr() );
if ( tHist.m_bFloat )
sRes.Appendf ( "hist_interval=%f, hist_offset=%f", std::get<float>( tHist.m_tInterval ), std::get<float> ( tHist.m_tOffset ) );
else
sRes.Appendf ( "hist_interval=" INT64_FMT ", hist_offset=" INT64_FMT, std::get<int64_t>( tHist.m_tInterval ), std::get<int64_t>( tHist.m_tOffset ) );
sRes += "})";
return CSphString ( sRes );
}
static CSphString DumpAggrHist ( const CSphString & sCol, const AggrDateHistSetting_t & tHist )
{
StringBuilder_c sRes;
sRes.Appendf ( "date_histogram(%s, {", sCol.cstr() );
sRes.Appendf ( "calendar_interval='%s'", tHist.m_sInterval.cstr() );
sRes += "})";
return CSphString ( sRes );
}
CSphString DumpAggr ( const CSphString & sCol, const AggrSettings_t & tAggr )
{
switch ( tAggr.m_eAggrFunc )
{
case Aggr_e::RANGE: return DumpAggrRange ( sCol, tAggr.m_tRange );
case Aggr_e::DATE_RANGE: return DumpAggrRange ( sCol, tAggr.m_tDateRange );
case Aggr_e::HISTOGRAM: return DumpAggrHist ( sCol, tAggr.m_tHist );
case Aggr_e::DATE_HISTOGRAM: return DumpAggrHist ( sCol, tAggr.m_tDateHist );
default: return sCol;
}
}
static void ConvertIntoFloat ( AggrBound_t & tVal )
{
if ( std::holds_alternative<int64_t> ( tVal ) )
tVal = (float)std::get<int64_t> ( tVal );
}
void FixFloat ( AggrHistSetting_t & tHist )
{
if ( tHist.m_tInterval.index()!=tHist.m_tOffset.index() )
{
ConvertIntoFloat ( tHist.m_tInterval );
ConvertIntoFloat ( tHist.m_tOffset );
tHist.m_bFloat = true;
} else
{
tHist.m_bFloat = std::holds_alternative<float> ( tHist.m_tInterval );
}
}
static void SetValue ( const CSphNamedVariant & tPair, AggrBound_t & tVal )
{
switch ( tPair.m_eType )
{
case VariantType_e::BIGINT: tVal = tPair.m_iValue; return;
case VariantType_e::FLOAT: tVal = tPair.m_fValue; return;
default: assert ( 0 && "internal error: unhandled aggregate value" ); return;
}
}
bool ParseAggrHistogram ( const VecTraits_T < CSphNamedVariant > & dVariants, AggrHistSetting_t & tHist, CSphString & sError )
{
for ( const auto & tPair : dVariants )
{
if ( tPair.m_eType!=VariantType_e::BIGINT && tPair.m_eType!=VariantType_e::FLOAT )
{
sError.SetSprintf ( "invalid value '%s' type %d", tPair.m_sKey.cstr(), (int)tPair.m_eType );
return false;
}
if ( tPair.m_sKey=="hist_interval" )
SetValue ( tPair, tHist.m_tInterval );
else if ( tPair.m_sKey=="hist_offset" )
SetValue ( tPair, tHist.m_tOffset );
else
{
sError.SetSprintf ( "unknow value '%s'", tPair.m_sKey.cstr() );
return false;
}
}
FixFloat ( tHist );
return true;
}
bool ParseAggrDateHistogram ( const VecTraits_T < CSphNamedVariant > & dVariants, AggrDateHistSetting_t & tHist, CSphString & sError )
{
for ( const auto & tPair : dVariants )
{
if ( tPair.m_eType!=VariantType_e::STRING )
{
sError.SetSprintf ( "invalid value '%s' type %d", tPair.m_sKey.cstr(), (int)tPair.m_eType );
return false;
}
if ( tPair.m_sKey=="calendar_interval" )
tHist.m_sInterval = tPair.m_sValue;
else
{
sError.SetSprintf ( "unknow value '%s'", tPair.m_sKey.cstr() );
return false;
}
}
if ( tHist.m_sInterval.IsEmpty() )
{
sError.SetSprintf ( "calendar_interval missed" );
return false;
}
DateUnit_e eUnit = ParseDateInterval ( tHist.m_sInterval, sError );
if ( eUnit==DateUnit_e::total_units )
return false;
return true;
}
// the aggr histogram implementation
template < bool FLOAT >
class AggrHistExpr_T : public Expr_ArgVsSet_T<int>
{
AggrHistSetting_t m_tHist;
public:
AggrHistExpr_T ( ISphExpr * pAttr, const AggrHistSetting_t & tHist )
: Expr_ArgVsSet_T ( pAttr )
, m_tHist ( tHist )
{
assert ( ( m_tHist.m_bFloat && std::get<float> ( m_tHist.m_tInterval )>0.0f ) || ( !m_tHist.m_bFloat && std::get<int64_t> ( m_tHist.m_tInterval )>0 ) );
}
int IntEval ( const CSphMatch & tMatch ) const final
{
int iBucket = GetBucket ( tMatch );
return iBucket;
}
protected:
int GetBucket ( const CSphMatch & tMatch ) const
{
if_const ( FLOAT )
{
float fInterval = std::get<float> ( m_tHist.m_tInterval );
float fOffset = std::get<float> ( m_tHist.m_tOffset );
float fVal = m_pArg->Eval ( tMatch );
float fBucketNrm = floor ( ( fVal - fOffset ) / fInterval );
int iBucket = fBucketNrm * fInterval + fOffset;
return iBucket;
} else
{
int64_t iInterval = std::get<int64_t> ( m_tHist.m_tInterval );
int64_t iOffset = std::get<int64_t> ( m_tHist.m_tOffset );
int64_t iVal = m_pArg->Int64Eval ( tMatch );
double fBucketNrm = floor ( double ( iVal - iOffset ) / iInterval );
int iBucket = fBucketNrm * iInterval + iOffset;
return iBucket;
}
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("AggrHistExpr_T");
return CALC_DEP_HASHES();
}
ISphExpr * Clone() const final
{
return new AggrHistExpr_T ( *this );
}
private:
AggrHistExpr_T ( const AggrHistExpr_T & rhs )
: Expr_ArgVsSet_T ( rhs )
, m_tHist ( rhs.m_tHist )
{
}
};
ISphExpr * CreateExprHistogram ( ISphExpr * pAttr, const AggrHistSetting_t & tHist )
{
if ( tHist.m_bFloat )
return new AggrHistExpr_T<true> ( pAttr, tHist );
else
return new AggrHistExpr_T<false> ( pAttr, tHist );
}
// the aggr date histogram implementation
class AggrDateHistExpr_c : public Expr_ArgVsSet_T<int>
{
AggrDateHistSetting_t m_tHist;
DateUnit_e m_eUnit = DateUnit_e::total_units;
public:
AggrDateHistExpr_c ( ISphExpr * pAttr, const AggrDateHistSetting_t & tHist )
: Expr_ArgVsSet_T ( pAttr )
, m_tHist ( tHist )
{
CSphString sError;
m_eUnit = ParseDateInterval ( tHist.m_sInterval, sError );
assert ( m_eUnit!=DateUnit_e::total_units );
}
int IntEval ( const CSphMatch & tMatch ) const final
{
int iBucket = GetBucket ( tMatch );
return iBucket;
}
protected:
int GetBucket ( const CSphMatch & tMatch ) const
{
time_t iVal = m_pArg->IntEval ( tMatch );
RoundDate ( m_eUnit, iVal );
return iVal;
}
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final
{
EXPR_CLASS_NAME("AggrDateHistExpr_c");
return CALC_DEP_HASHES();
}
ISphExpr * Clone() const final
{
return new AggrDateHistExpr_c ( *this );
}
private:
AggrDateHistExpr_c ( const AggrDateHistExpr_c & rhs )
: Expr_ArgVsSet_T ( rhs )
, m_tHist ( rhs.m_tHist )
, m_eUnit ( rhs.m_eUnit )
{
}
};
ISphExpr * CreateExprDateHistogram ( ISphExpr * pAttr, const AggrDateHistSetting_t & tHist )
{
return new AggrDateHistExpr_c ( pAttr, tHist );
}
| 19,165
|
C++
|
.cpp
| 609
| 28.643678
| 163
| 0.673785
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,894
|
sphinxsearch.cpp
|
manticoresoftware_manticoresearch/src/sphinxsearch.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxsearch.h"
#include "searchnode.h"
#include "querycontext.h"
#include "sphinxplugin.h"
#include "sphinxqcache.h"
#include "attribute.h"
#include "conversion.h"
#include "secondaryindex.h"
#include "dict/dict_entry.h"
#include "client_task_info.h"
#include <math.h>
bool operator < ( const SkiplistEntry_t & a, RowID_t b ) { return a.m_tBaseRowIDPlus1<b; }
bool operator == ( const SkiplistEntry_t & a, RowID_t b ) { return a.m_tBaseRowIDPlus1==b; }
bool operator < ( RowID_t a, const SkiplistEntry_t & b ) { return a<b.m_tBaseRowIDPlus1; }
void SkipData_t::Read ( const BYTE * pSkips, const DictEntry_t & tRes, int iDocs, int iSkipBlockSize )
{
const BYTE * pSkip = pSkips + tRes.m_iSkiplistOffset;
m_dSkiplist.Add();
m_dSkiplist.Last().m_tBaseRowIDPlus1 = 0;
m_dSkiplist.Last().m_iOffset = tRes.m_iDoclistOffset;
m_dSkiplist.Last().m_iBaseHitlistPos = 0;
for ( int i=1; i < iDocs/iSkipBlockSize; i++ )
{
SkiplistEntry_t & t = m_dSkiplist.Add();
SkiplistEntry_t & p = m_dSkiplist [ m_dSkiplist.GetLength()-2 ];
t.m_tBaseRowIDPlus1 = p.m_tBaseRowIDPlus1 + iSkipBlockSize + UnzipIntBE(pSkip);
t.m_iOffset = p.m_iOffset + 4*iSkipBlockSize + UnzipOffsetBE(pSkip);
t.m_iBaseHitlistPos = p.m_iBaseHitlistPos + UnzipOffsetBE(pSkip);
}
}
//////////////////////////////////////////////////////////////////////////
#define SPH_TREE_DUMP 0
#define SPH_BM25_SCALE 1000
RowID_t ISphQword::AdvanceTo ( RowID_t tRowID )
{
// this is sub-optimal, faster versions of AdvanceTo should be implemented in descendants
HintRowID ( tRowID );
RowID_t tFoundRowID = INVALID_ROWID;
do
{
tFoundRowID = GetNextDoc().m_tRowID;
}
while ( tFoundRowID < tRowID );
return tFoundRowID;
}
void ISphQword::CollectHitMask()
{
if ( m_bAllFieldsKnown )
return;
SeekHitlist ( m_iHitlistPos );
for ( Hitpos_t uHit = GetNextHit(); uHit!=EMPTY_HIT; uHit = GetNextHit() )
m_dQwordFields.Set ( HITMAN::GetField ( uHit ) );
m_bAllFieldsKnown = true;
}
void ISphQword::Reset ()
{
m_iDocs = 0;
m_iHits = 0;
m_dQwordFields.UnsetAll();
m_bAllFieldsKnown = false;
m_uMatchHits = 0;
m_iHitlistPos = 0;
}
int ISphQword::GetAtomPos() const
{
return m_iAtomPos;
}
/// per-document zone information (span start/end positions)
struct ZoneInfo_t
{
RowID_t m_tRowID;
ZoneHits_t * m_pHits;
};
// FindSpan vector operators
static bool operator < ( const ZoneInfo_t & tZone, RowID_t tRowID )
{
return tZone.m_tRowID<tRowID;
}
static bool operator == ( const ZoneInfo_t & tZone, RowID_t tRowID )
{
return tZone.m_tRowID==tRowID;
}
static bool operator < ( RowID_t tRowID, const ZoneInfo_t & tZone )
{
return tRowID<tZone.m_tRowID;
}
//////////////////////////////////////////////////////////////////////////
// RANKER
//////////////////////////////////////////////////////////////////////////
typedef CSphFixedVector < CSphVector < ZoneInfo_t > > ZoneVVector_t;
struct RankerSettings_t
{
bool m_bRowidLimits = false;
bool m_bSkipQCache = false;
bool m_bCollectHits = true;
RowIdBoundaries_t m_tBoundaries;
};
/// ranker interface
/// ranker folds incoming hitstream into simple match chunks, and computes relevance rank
class ExtRanker_c : public ISphRanker, public ISphZoneCheck
{
public:
ExtRanker_c ( const XQQuery_t & tXQ, const ISphQwordSetup & tSetup, const RankerSettings_t & tSettings, bool bUseBM25 );
~ExtRanker_c() override;
void Reset ( const ISphQwordSetup & tSetup ) override;
CSphMatch * GetMatchesBuffer () override { return m_dMatches; }
int GetQwords ( ExtQwordsHash_t & hQwords ) { return m_pRoot ? m_pRoot->GetQwords ( hQwords ) : -1; }
virtual void SetQwordsIDF ( const ExtQwordsHash_t & hQwords );
virtual void SetTermDupes ( const ExtQwordsHash_t & , int ) {}
virtual bool InitState ( const CSphQueryContext &, CSphString & ) { return true; }
void FinalizeCache ( const ISphSchema & tSorterSchema ) override;
NodeEstimate_t Estimate ( int64_t iTotalDocs ) const override;
public:
// FIXME? hide and friend?
SphZoneHit_e IsInZone ( int iZone, const ExtHit_t * pHit, int * pLastSpan ) override;
virtual const CSphIndex * GetIndex() { return m_pIndex; }
const CSphQueryContext * GetCtx() const { return m_pCtx; }
public:
CSphMatch m_dMatches[MAX_BLOCK_DOCS]; ///< exposed for caller
DWORD m_uPayloadMask = 0; ///< exposed for ranker state functors
int m_iQwords = 0; ///< exposed for ranker state functors
int m_iMaxQpos = 0; ///< max in-query pos among all keywords, including dupes; for ranker state functors
protected:
std::unique_ptr<ExtNode_i> m_pRoot;
ExtNode_i * m_pOriginalRoot = nullptr; ///< set if we replace the
const ExtDoc_t * m_pDoclist = nullptr;
const ExtHit_t * m_pHitlist = nullptr;
ExtDoc_t m_dMyDocs[MAX_BLOCK_DOCS]; ///< my local documents pool; for filtering
CSphMatch m_dMyMatches[MAX_BLOCK_DOCS]; ///< my local matches pool; for filtering
CSphMatch m_tTestMatch;
const CSphIndex * m_pIndex = nullptr; ///< this is he who'll do my filtering!
CSphQueryContext * m_pCtx = nullptr;
int64_t * m_pNanoBudget = nullptr;
QcacheEntry_c * m_pQcacheEntry = nullptr; ///< data to cache if we decide that the current query is worth caching
StrVec_t m_dZones;
CSphVector<std::unique_ptr<ExtNode_i>> m_dZoneStartTerm;
CSphVector<std::unique_ptr<ExtNode_i>> m_dZoneEndTerm;
CSphVector<const ExtDoc_t*> m_dZoneStart;
CSphVector<const ExtDoc_t*> m_dZoneEnd;
CSphVector<RowID_t> m_dZoneMax; ///< last rowid we (tried) to cache
CSphVector<RowID_t> m_dZoneMin; ///< first rowid we (tried) to cache
ZoneVVector_t m_dZoneInfo {0};
bool m_bZSlist;
static constexpr float COST_SCALE = 1.0f/1000000.0f;
void CleanupZones ( RowID_t tMaxRowID );
void UpdateQcache ( int iMatches );
virtual float CalcRankCost ( int64_t iDocs ) const = 0;
bool ExtraDataImpl ( ExtraData_e eType, void ** ppResult ) override
{
if ( !m_pRoot )
return true;
switch ( eType )
{
case EXTRA_SET_ITERATOR:
assert ( !m_pOriginalRoot );
m_pOriginalRoot = m_pRoot.release();
m_pRoot = CreatePseudoFTNode ( m_pOriginalRoot, (RowidIterator_i*)*ppResult );
return true;
case EXTRA_SET_BOUNDARIES:
m_pRoot->SetRowidBoundaries ( *(const RowIdBoundaries_t*)ppResult );
return true;
default:
return false;
}
}
};
template <bool USE_BM25>
class ExtRanker_T : public ExtRanker_c
{
public:
ExtRanker_T ( const XQQuery_t & tXQ, const ISphQwordSetup & tSetup, const RankerSettings_t & tSettings );
virtual const ExtDoc_t * GetFilteredDocs ();
};
STATIC_ASSERT ( ( 8*8*sizeof(DWORD) )>=SPH_MAX_FIELDS, PAYLOAD_MASK_OVERFLOW );
static const bool WITH_BM25 = true;
template < bool USE_BM25 = false >
class ExtRanker_WeightSum_c : public ExtRanker_T<USE_BM25>
{
protected:
int m_iWeights = 0;
const int * m_pWeights = nullptr;
public:
ExtRanker_WeightSum_c ( const XQQuery_t & tXQ, const ISphQwordSetup & tSetup, const RankerSettings_t & tSettings )
: ExtRanker_T<USE_BM25> ( tXQ, tSetup, { tSettings.m_bRowidLimits, tSettings.m_bSkipQCache, false, tSettings.m_tBoundaries } )
{}
int GetMatches () override;
float CalcRankCost ( int64_t iDocs ) const override { return 0.0f; }
bool InitState ( const CSphQueryContext & tCtx, CSphString & ) override
{
m_iWeights = tCtx.m_iWeights;
m_pWeights = tCtx.m_dWeights;
return true;
}
};
class ExtRanker_None_c : public ExtRanker_T<false>
{
using BASE = ExtRanker_T<false>;
public:
ExtRanker_None_c ( const XQQuery_t & tXQ, const ISphQwordSetup & tSetup, const RankerSettings_t & tSettings )
: ExtRanker_T<false> ( tXQ, tSetup, { tSettings.m_bRowidLimits, tSettings.m_bSkipQCache, false, tSettings.m_tBoundaries } )
{}
int GetMatches () override;
float CalcRankCost ( int64_t iDocs ) const override { return float(iDocs)*BASE::COST_SCALE*13.0f; }
};
template < typename STATE, bool USE_BM25 >
class ExtRanker_State_T : public ExtRanker_T<USE_BM25>
{
using BASE = ExtRanker_T<USE_BM25>;
public:
ExtRanker_State_T ( const XQQuery_t & tXQ, const ISphQwordSetup & tSetup, const RankerSettings_t & tSettings );
int GetMatches () override;
bool InitState ( const CSphQueryContext & tCtx, CSphString & sError ) override
{
return m_tState.Init ( tCtx.m_iWeights, &tCtx.m_dWeights[0], this, sError, tCtx.m_uPackedFactorFlags );
}
// FIXME! add specific costs for different rankers
float CalcRankCost ( int64_t iDocs ) const override { return float(iDocs)*BASE::COST_SCALE*18.0f; }
protected:
STATE m_tState;
const ExtHit_t * m_pHitBase;
CSphVector<int> m_dZonespans; // zonespanlists for my matches
private:
bool ExtraDataImpl ( ExtraData_e eType, void ** ppResult ) override
{
if ( BASE::ExtraDataImpl ( eType, ppResult ) )
return true;
switch ( eType )
{
case EXTRA_GET_DATA_ZONESPANS:
assert ( ppResult );
*ppResult = &m_dZonespans;
return true;
default:
return m_tState.ExtraData ( eType, ppResult );
}
}
};
//////////////////////////////////////////////////////////////////////////
namespace { // static
// dump spec for keyword nodes
// fixme! consider make specific implementation if dot/plain need to be different
void RenderAccessSpecs ( StringBuilder_c & tRes, const bson::Bson_c& tBson, bool bWithZones )
{
using namespace bson;
{
ScopedComma_c dFieldsComma ( tRes, ", ", "fields=(", ")" );
Bson_c ( tBson.ChildByName ( SZ_FIELDS ) ).ForEach ( [&tRes] ( const NodeHandle_t & tNode ) {
tRes << String ( tNode );
} );
}
int iPos = (int)Int ( tBson.ChildByName ( SZ_MAX_FIELD_POS ) );
if ( iPos )
tRes.Sprintf ( "max_field_pos=%d", iPos );
if ( !bWithZones )
return;
auto tZones = tBson.GetFirstOf ( { SZ_ZONES, SZ_ZONESPANS } );
ScopedComma_c dZoneDelim ( tRes, ", ", ( tZones.first==1 ) ? "zonespans=(" : "zones=(", ")" );
Bson_c ( tZones.second ).ForEach ( [&tRes] ( const NodeHandle_t & tNode ) {
tRes << String ( tNode );
} );
}
bool RenderKeywordNode ( StringBuilder_c & tRes, const bson::Bson_c& tBson )
{
using namespace bson;
auto tWord = tBson.ChildByName ( SZ_WORD );
if ( IsNullNode ( tWord ) )
return false;
ScopedComma_c ExplainComma ( tRes, ", ", "KEYWORD(", ")" );
tRes << String ( tWord );
tRes.Sprintf ( "querypos=%d", Int ( tBson.ChildByName ( SZ_QUERYPOS ) ) );
if ( Bool ( tBson.ChildByName ( SZ_EXCLUDED ) ) )
tRes += "excluded";
if ( Bool ( tBson.ChildByName ( SZ_EXPANDED ) ) )
tRes += "expanded";
if ( Bool ( tBson.ChildByName ( SZ_FIELD_START ) ) )
tRes += "field_start";
if ( Bool ( tBson.ChildByName ( SZ_FIELD_END ) ) )
tRes += "field_end";
if ( Bool ( tBson.ChildByName ( SZ_MORPHED ) ) )
tRes += "morphed";
if ( Bool ( tBson.ChildByName ( SZ_REGEX ) ) )
tRes += "regex";
auto tBoost = tBson.ChildByName ( SZ_BOOST );
if ( !IsNullNode ( tBoost ) )
{
auto fBoost = Double ( tBoost );
if ( fBoost!=1.0f ) // really comparing floats?
tRes.Appendf ( "boost=%f", fBoost );
}
return true;
}
void RenderPlainBsonPlan ( bson::NodeHandle_t dBson, StringBuilder_c & tRes, bool bWithZones,
int iIndent, const char * szIndent, const char * szLinebreak )
{
using namespace bson;
if ( dBson==nullnode )
return;
Bson_c tBson ( dBson );
if ( RenderKeywordNode ( tRes, tBson ) )
return;
ScopedComma_c sEmpty ( tRes, nullptr );
if ( iIndent )
tRes += szLinebreak;
for ( int i = 0; i<iIndent; ++i )
tRes += szIndent;
tRes << String ( tBson.ChildByName ( SZ_TYPE ) );
// enclose the rest in brackets, comma-separated
ScopedComma_c ExplainComma ( tRes, ", ", "(", ")" );
Bson_c ( tBson.ChildByName ( SZ_OPTIONS ) ).ForEach ( [&tRes] ( CSphString&& sName, const NodeHandle_t & tNode ) {
tRes.Sprintf ( "%s=%d", sName.cstr (), (int) Int ( tNode ) );
} );
if ( Bool ( tBson.ChildByName ( SZ_VIRTUALLY_PLAIN ) ) )
tRes += "virtually-plain";
// dump spec for keyword nodes
RenderAccessSpecs ( tRes, dBson, bWithZones );
Bson_c ( tBson.ChildByName ( SZ_CHILDREN ) ).ForEach ( [&] ( const NodeHandle_t & tNode ) {
RenderPlainBsonPlan ( tNode, tRes, bWithZones, iIndent+1, szIndent, szLinebreak );
} );
}
bool RenderKeywordNodeDot ( StringBuilder_c & tRes, const bson::Bson_c& tBson )
{
using namespace bson;
auto tWord = tBson.ChildByName ( SZ_WORD );
if ( IsNullNode ( tWord ) )
return false;
//[shape=record label="wayyy | {expanded | pos=1}"]
ScopedComma_c ExplainComma ( tRes, " | ", "[shape=record label=\"", "\"]\n" );
tRes << String ( tWord );
ScopedComma_c ParamComma ( tRes, " | ", "{ ", " }" );
tRes.Sprintf ( "querypos=%d", Int ( tBson.ChildByName ( SZ_QUERYPOS ) ) );
if ( Bool ( tBson.ChildByName ( SZ_EXCLUDED ) ) )
tRes += "excluded";
if ( Bool ( tBson.ChildByName ( SZ_EXPANDED ) ) )
tRes += "expanded";
if ( Bool ( tBson.ChildByName ( SZ_FIELD_START ) ) )
tRes += "field_start";
if ( Bool ( tBson.ChildByName ( SZ_FIELD_END ) ) )
tRes += "field_end";
if ( Bool ( tBson.ChildByName ( SZ_MORPHED ) ) )
tRes += "morphed";
if ( Bool ( tBson.ChildByName ( SZ_REGEX ) ) )
tRes += "regex";
auto tBoost = tBson.ChildByName ( SZ_BOOST );
if ( !IsNullNode ( tBoost ) )
{
auto fBoost = Double ( tBoost );
if ( fBoost!=1.0f ) // really comparing floats?
tRes.Appendf ( "boost=%f", fBoost );
}
return true;
}
void RenderDotBsonNodePlan ( bson::NodeHandle_t dBson, StringBuilder_c & tRes, int& iId )
{
using namespace bson;
if ( dBson==nullnode )
return;
tRes << "\n" << iId << " "; // node num
Bson_c tBson ( dBson );
if ( RenderKeywordNodeDot ( tRes, tBson ) )
return;
{
ScopedComma_c ExplainComma ( tRes, " | ", R"([shape=record,style=filled,bgcolor="lightgrey" label=")", "\"]\n" );
tRes << String ( tBson.ChildByName ( SZ_TYPE ) );
ScopedComma_c ParamComma ( tRes, " \\n| ", "{ ", " }" );
// enclose the rest in brackets, comma-separated
Bson_c ( tBson.ChildByName ( SZ_OPTIONS ) ).ForEach (
[&tRes] ( CSphString && sName, const NodeHandle_t & tNode ) {
tRes.Sprintf ( "%s=%d", sName.cstr (), (int) Int ( tNode ) );
} );
if ( Bool ( tBson.ChildByName ( SZ_VIRTUALLY_PLAIN ) ) )
tRes << "virtually-plain";
// dump spec for keyword nodes
RenderAccessSpecs ( tRes, dBson, true );
}
int iRoot = iId;
Bson_c ( tBson.ChildByName ( SZ_CHILDREN ) ).ForEach ( [&] ( const NodeHandle_t & tNode ) {
++iId;
tRes << iRoot << " -> " << iId;
RenderDotBsonNodePlan ( tNode, tRes, iId );
} );
}
void RenderDotBsonPlan ( bson::NodeHandle_t dBson, StringBuilder_c & tRes )
{
int iId=0;
tRes << "digraph \"transformed_tree\"\n{\n";
RenderDotBsonNodePlan ( dBson, tRes, iId );
tRes << "}";
}
// parse node to bson
void XQNodeGetExtraBson ( bson::Assoc_c & tNode, const XQNode_t * pNode )
{
switch ( pNode->GetOp() )
{
case SPH_QUERY_PROXIMITY:
case SPH_QUERY_NEAR: bson::Obj_c ( tNode.StartObj (SZ_OPTIONS) ).AddInt ( "distance", pNode->m_iOpArg ); break;
case SPH_QUERY_QUORUM: bson::Obj_c ( tNode.StartObj (SZ_OPTIONS) ).AddInt ( "count", pNode->m_iOpArg ); break;
default: break;
}
}
void AddAccessSpecsBson ( bson::Assoc_c & tNode, const XQNode_t * pNode, const CSphSchema & tSchema, const StrVec_t & dZones )
{
assert ( pNode );
// dump spec for keyword nodes
// FIXME? double check that spec does *not* affect non keyword nodes
if ( pNode->m_dSpec.IsEmpty () || pNode->m_dWords.IsEmpty () )
return;
const XQLimitSpec_t & s = pNode->m_dSpec;
if ( s.m_bFieldSpec && !s.m_dFieldMask.TestAll ( true ) )
{
StrVec_t dFields;
for ( int i = 0; i<tSchema.GetFieldsCount (); ++i )
if ( s.m_dFieldMask.Test ( i ) )
dFields.Add ( tSchema.GetFieldName ( i ) );
tNode.AddStringVec( SZ_FIELDS, dFields );
}
if ( s.m_iFieldMaxPos )
tNode.AddInt ( SZ_MAX_FIELD_POS, s.m_iFieldMaxPos );
if ( s.m_dZones.GetLength () )
tNode.AddStringVec ( s.m_bZoneSpan ? SZ_ZONESPANS : SZ_ZONES, dZones );
}
void CreateKeywordBson ( bson::Assoc_c& tWord, const XQKeyword_t & tKeyword )
{
tWord.AddString ( SZ_TYPE, "KEYWORD" );
tWord.AddString ( SZ_WORD, tKeyword.m_sWord.cstr () );
tWord.AddInt ( SZ_QUERYPOS, tKeyword.m_iAtomPos );
if ( tKeyword.m_bExcluded )
tWord.AddBool ( SZ_EXCLUDED, true );
if ( tKeyword.m_bExpanded )
tWord.AddBool ( SZ_EXPANDED, true );
if ( tKeyword.m_bFieldStart )
tWord.AddBool ( SZ_FIELD_START, true );
if ( tKeyword.m_bFieldEnd )
tWord.AddBool ( SZ_FIELD_END, true );
if ( tKeyword.m_bMorphed )
tWord.AddBool ( SZ_MORPHED, true );
if ( tKeyword.m_bRegex )
tWord.AddBool ( SZ_REGEX, true );
if ( tKeyword.m_fBoost!=1.0f )
tWord.AddDouble ( SZ_BOOST, tKeyword.m_fBoost );
}
void BuildPlanBson ( bson::Assoc_c& tPlan, const XQNode_t * pNode, const CSphSchema & tSchema, const StrVec_t & dZones )
{
using namespace bson;
tPlan.AddString ( SZ_TYPE, sphXQNodeToStr ( pNode ).cstr() );
XQNodeGetExtraBson ( tPlan, pNode );
AddAccessSpecsBson ( tPlan, pNode, tSchema, dZones );
if ( pNode->m_dChildren.GetLength () && pNode->m_dWords.GetLength () )
tPlan.AddBool ( SZ_VIRTUALLY_PLAIN, true );
if ( pNode->m_dChildren.IsEmpty () )
{
MixedVector_c dChildren ( tPlan.StartMixedVec( SZ_CHILDREN ), pNode->m_dWords.GetLength() );
for ( const auto & i : pNode->m_dWords )
{
Obj_c tWord ( dChildren.StartObj () );
CreateKeywordBson ( tWord, i );
}
} else
{
MixedVector_c dChildren ( tPlan.StartMixedVec ( SZ_CHILDREN ), pNode->m_dChildren.GetLength () );
for ( const auto & i : pNode->m_dChildren )
{
Obj_c tChild ( dChildren.StartObj () );
BuildPlanBson ( tChild, i, tSchema, dZones );
}
}
}
} // namespace static
void sph::RenderBsonPlan ( StringBuilder_c& tRes, const bson::NodeHandle_t & dBson, bool bDot )
{
if ( bDot )
RenderDotBsonPlan ( dBson, tRes );
else
RenderPlainBsonPlan ( dBson, tRes, true, 0, " ", "\n" );
#if 0
CSphString sResult1;
bson::Bson_c ( dBson ).BsonToJson ( sResult1 );
tRes << "raw: " << sResult1;
#endif
}
CSphString sph::RenderBsonPlanBrief ( const bson::NodeHandle_t& dBson )
{
StringBuilder_c tRes;
RenderPlainBsonPlan ( dBson, tRes, false, 0, "", " " );
CSphString sResult;
tRes.MoveTo ( sResult );
return sResult;
}
Bson_t sphExplainQuery ( const XQNode_t * pNode, const CSphSchema & tSchema, const StrVec_t & dZones )
{
CSphVector<BYTE> dPlan;
{
bson::Root_c tPlan ( dPlan );
::BuildPlanBson ( tPlan, pNode, tSchema, dZones );
}
return dPlan;
}
void QueryProfile_c::BuildResult ( XQNode_t * pRoot, const CSphSchema & tSchema, const StrVec_t & dZones )
{
if ( m_eNeedPlan == PLAN_FLAVOUR::ENONE )
return;
m_dPlan.Reset();
bson::Root_c tPlan ( m_dPlan );
::BuildPlanBson ( tPlan, pRoot, tSchema, dZones );
}
ExtRanker_c::ExtRanker_c ( const XQQuery_t & tXQ, const ISphQwordSetup & tSetup, const RankerSettings_t & tSettings, bool bUseBM25 )
{
assert ( tSetup.m_pCtx );
for ( int i=0; i<MAX_BLOCK_DOCS; i++ )
{
m_dMatches[i].Reset ( tSetup.m_iDynamicRowitems );
m_dMyMatches[i].Reset ( tSetup.m_iDynamicRowitems );
}
m_tTestMatch.Reset ( tSetup.m_iDynamicRowitems );
assert ( tXQ.m_pRoot );
tSetup.m_pZoneChecker = this;
assert ( !m_pRoot );
m_pRoot.reset ( ExtNode_i::Create ( tXQ.m_pRoot, tSetup, bUseBM25, tSettings.m_bRowidLimits ? &tSettings.m_tBoundaries : nullptr ) );
if ( m_pRoot && tSettings.m_bCollectHits )
m_pRoot->SetCollectHits();
#if SPH_TREE_DUMP
if ( m_pRoot )
m_pRoot->DebugDump(0);
#endif
// we generally have three (!) trees for each query
// 1) parsed tree, a raw result of query parsing
// 2) transformed tree, with star expansions, morphology, and other transformations
// 3) evaluation tree, with tiny keywords cache, and other optimizations
// tXQ.m_pRoot, passed to ranker from the index, is the transformed tree
// m_pRoot, internal to ranker, is the evaluation tree
if ( tSetup.m_pCtx->m_pProfile )
tSetup.m_pCtx->m_pProfile->BuildResult ( tXQ.m_pRoot, tSetup.m_pIndex->GetMatchSchema(), tXQ.m_dZones );
m_pIndex = tSetup.m_pIndex;
m_pCtx = tSetup.m_pCtx;
m_pNanoBudget = tSetup.m_pStats ? tSetup.m_pStats->m_pNanoBudget : nullptr;
m_dZones = tXQ.m_dZones;
m_dZoneStart.Resize ( m_dZones.GetLength() );
m_dZoneEnd.Resize ( m_dZones.GetLength() );
m_dZoneMax.Resize ( m_dZones.GetLength() );
m_dZoneMin.Resize ( m_dZones.GetLength() );
m_dZoneMax.Fill ( 0 );
m_dZoneMin.Fill ( INVALID_ROWID );
m_bZSlist = tXQ.m_bNeedSZlist;
m_dZoneInfo.Reset ( m_dZones.GetLength() );
DictRefPtr_c pZonesDict;
// workaround for a particular case with following conditions
if ( !m_pIndex->GetDictionary()->GetSettings().m_bWordDict && m_dZones.GetLength() )
pZonesDict = m_pIndex->GetDictionary()->Clone();
ARRAY_FOREACH ( i, m_dZones )
{
XQKeyword_t tDot;
tDot.m_sWord.SetSprintf ( "%c%s", MAGIC_CODE_ZONE, m_dZones[i].cstr() );
auto& pStartTerm = m_dZoneStartTerm.Add();
pStartTerm.reset ( ExtNode_i::Create ( tDot, tSetup, pZonesDict, false, tSettings.m_bRowidLimits ) );
assert ( pStartTerm );
pStartTerm->SetCollectHits();
m_dZoneStart[i] = nullptr;
tDot.m_sWord.SetSprintf ( "%c/%s", MAGIC_CODE_ZONE, m_dZones[i].cstr() );
auto& pEndTerm = m_dZoneEndTerm.Add();
pEndTerm.reset ( ExtNode_i::Create ( tDot, tSetup, pZonesDict, false, tSettings.m_bRowidLimits ) );
assert ( pEndTerm );
pEndTerm->SetCollectHits();
m_dZoneEnd[i] = nullptr;
}
if ( QcacheGetStatus().m_iMaxBytes>0 && !tSettings.m_bSkipQCache )
{
m_pQcacheEntry = new QcacheEntry_c();
m_pQcacheEntry->m_iIndexId = m_pIndex->GetIndexId();
}
memset ( m_dMyDocs, 0, sizeof ( m_dMyDocs ) );
}
ExtRanker_c::~ExtRanker_c ()
{
SafeRelease ( m_pQcacheEntry );
for ( auto& tInfo : m_dZoneInfo )
{
for ( auto& tDoc : tInfo )
SafeDelete ( tDoc.m_pHits );
tInfo.Reset();
}
}
void ExtRanker_c::Reset ( const ISphQwordSetup & tSetup )
{
if ( m_pRoot )
m_pRoot->Reset(tSetup);
// restore the tree to its original state before switching to the next chunk
if ( m_pOriginalRoot )
{
// pseudo-fulltext nodes forget about their underlying nodes after Reset()
// so deleting m_pRoot deletes only pseudo-FT nodes and keeps the original tree
m_pRoot.reset();
m_pRoot = std::unique_ptr<ExtNode_i>(m_pOriginalRoot);
m_pOriginalRoot = nullptr;
}
ARRAY_FOREACH ( i, m_dZones )
{
m_dZoneStartTerm[i]->Reset ( tSetup );
m_dZoneEndTerm[i]->Reset ( tSetup );
m_dZoneStart[i] = nullptr;
m_dZoneEnd[i] = nullptr;
}
m_dZoneMax.Fill ( 0 );
m_dZoneMin.Fill ( INVALID_ROWID );
for ( auto& tInfo : m_dZoneInfo )
{
for ( auto& tDoc: tInfo )
SafeDelete ( tDoc.m_pHits );
tInfo.Reset();
}
// Ranker::Reset() happens on a switch to next RT segment
// next segment => new and shiny docids => gotta restart encoding
if ( m_pQcacheEntry )
m_pQcacheEntry->RankerReset();
}
void ExtRanker_c::UpdateQcache ( int iMatches )
{
if ( m_pQcacheEntry && iMatches )
{
CSphScopedProfile tProf ( m_pCtx->m_pProfile, SPH_QSTATE_QCACHE_UP );
for ( int i=0; i<iMatches; i++ )
m_pQcacheEntry->Append ( m_dMatches[i].m_tRowID, m_dMatches[i].m_iWeight );
}
}
void ExtRanker_c::FinalizeCache ( const ISphSchema & tSorterSchema )
{
if ( m_pQcacheEntry )
{
CSphScopedProfile tProf ( m_pCtx->m_pProfile, SPH_QSTATE_QCACHE_FINAL );
QcacheAdd ( m_pCtx->m_tQuery, m_pQcacheEntry, tSorterSchema );
}
SafeReleaseAndZero ( m_pQcacheEntry );
}
NodeEstimate_t ExtRanker_c::Estimate ( int64_t iTotalDocs ) const
{
if ( !m_pRoot )
return { 0.0f, 0 };
auto tRes = m_pRoot->Estimate(iTotalDocs);
tRes.m_fCost += CalcRankCost ( tRes.m_iDocs );
return tRes;
}
void ExtRanker_c::CleanupZones ( RowID_t tMaxRowID )
{
if ( tMaxRowID==INVALID_ROWID )
return;
ARRAY_FOREACH ( i, m_dZoneMin )
{
RowID_t tMinRowID = m_dZoneMin[i];
if ( tMinRowID==INVALID_ROWID )
continue;
auto& dZone = m_dZoneInfo[i];
int iSpan = FindSpan ( dZone, tMaxRowID );
if ( iSpan==-1 )
continue;
if ( iSpan==dZone.GetLength()-1 )
{
for ( auto& tZone : dZone )
SafeDelete ( tZone.m_pHits );
dZone.Resize ( 0 );
m_dZoneMin[i] = tMaxRowID;
continue;
}
for ( int iDoc=0; iDoc<=iSpan; ++iDoc )
SafeDelete ( dZone[iDoc].m_pHits );
int iLen = dZone.GetLength() - iSpan - 1;
memmove ( dZone.Begin(), dZone.Begin()+iSpan+1, sizeof(dZone[0]) * iLen );
dZone.Resize ( iLen );
m_dZoneMin[i] = dZone.Begin()->m_tRowID;
}
}
void ExtRanker_c::SetQwordsIDF ( const ExtQwordsHash_t & hQwords )
{
m_iQwords = hQwords.GetLength ();
if ( m_pRoot )
m_pRoot->SetQwordsIDF ( hQwords );
}
static SphZoneHit_e ZoneCacheFind ( const ZoneVVector_t & dZones, int iZone, const ExtHit_t * pHit, int * pLastSpan )
{
if ( !dZones[iZone].GetLength() )
return SPH_ZONE_NO_DOCUMENT;
ZoneInfo_t * pZone = dZones[iZone].BinarySearch ( bind ( &ZoneInfo_t::m_tRowID ), pHit->m_tRowID );
if ( pZone )
{
// remove end markers that might mess up ordering
Hitpos_t uPosWithField = HITMAN::GetPosWithField ( pHit->m_uHitpos );
int iSpan = FindSpan ( pZone->m_pHits->m_dStarts, uPosWithField );
if ( iSpan<0 || uPosWithField>pZone->m_pHits->m_dEnds[iSpan] )
return SPH_ZONE_NO_SPAN;
if ( pLastSpan )
*pLastSpan = iSpan;
return SPH_ZONE_FOUND;
}
return SPH_ZONE_NO_DOCUMENT;
}
SphZoneHit_e ExtRanker_c::IsInZone ( int iZone, const ExtHit_t * pHit, int * pLastSpan )
{
// quick route, we have current docid cached
SphZoneHit_e eRes = ZoneCacheFind ( m_dZoneInfo, iZone, pHit, pLastSpan );
if ( eRes!=SPH_ZONE_NO_DOCUMENT )
return eRes;
// is there any zone info for this document at all?
if ( pHit->m_tRowID<m_dZoneMax[iZone] )
return SPH_ZONE_NO_DOCUMENT;
// long route, read in zone info for all (!) the documents until next requested
// that's because we might be queried out of order
// current chunk
const ExtDoc_t * pStart = m_dZoneStart[iZone];
const ExtDoc_t * pEnd = m_dZoneEnd[iZone];
// now keep caching spans until we see current id
while ( pHit->m_tRowID >= m_dZoneMax[iZone] )
{
// get more docs if needed
if ( ( !pStart && m_dZoneMax[iZone]!=INVALID_ROWID ) || pStart->m_tRowID==INVALID_ROWID )
{
pStart = m_dZoneStartTerm[iZone]->GetDocsChunk();
if ( !pStart )
{
m_dZoneMax[iZone] = INVALID_ROWID;
return SPH_ZONE_NO_DOCUMENT;
}
}
if ( ( !pEnd && m_dZoneMax[iZone]!=INVALID_ROWID ) || pEnd->m_tRowID==INVALID_ROWID )
{
pEnd = m_dZoneEndTerm[iZone]->GetDocsChunk();
if ( !pEnd )
{
m_dZoneMax[iZone] = INVALID_ROWID;
return SPH_ZONE_NO_DOCUMENT;
}
}
assert ( pStart && pEnd );
// skip zone starts past already cached stuff
while ( pStart->m_tRowID<m_dZoneMax[iZone] )
pStart++;
if ( pStart->m_tRowID==INVALID_ROWID )
continue;
// skip zone ends until a match with start
while ( pEnd->m_tRowID<pStart->m_tRowID )
pEnd++;
if ( pEnd->m_tRowID==INVALID_ROWID )
continue;
// handle mismatching start/end ids
// (this must never happen normally, but who knows what data we're fed)
assert ( pStart->m_tRowID!=INVALID_ROWID );
assert ( pEnd->m_tRowID!=INVALID_ROWID );
assert ( pStart->m_tRowID<=pEnd->m_tRowID );
if ( pStart->m_tRowID!=pEnd->m_tRowID )
{
while ( pStart->m_tRowID < pEnd->m_tRowID )
pStart++;
if ( pStart->m_tRowID==INVALID_ROWID )
continue;
}
// first matching uncached docid found!
assert ( pStart->m_tRowID==pEnd->m_tRowID );
assert ( pStart->m_tRowID >= m_dZoneMax[iZone] );
// but maybe we don't need docid this big just yet?
if ( pStart->m_tRowID > pHit->m_tRowID )
{
// store current in-chunk positions
m_dZoneStart[iZone] = pStart;
m_dZoneEnd[iZone] = pEnd;
// no zone info for all those precending documents (including requested one)
m_dZoneMax[iZone] = pStart->m_tRowID;
return SPH_ZONE_NO_DOCUMENT;
}
// cache all matching docs from current chunks below requested docid
// (there might be more matching docs, but we are lazy and won't cache them upfront)
ExtDoc_t dCache [ MAX_BLOCK_DOCS ];
int iCache = 0;
while ( pStart->m_tRowID<=pHit->m_tRowID )
{
// match
if ( pStart->m_tRowID==pEnd->m_tRowID )
{
dCache[iCache++] = *pStart;
pStart++;
pEnd++;
continue;
}
// mismatch!
// this must not really happen, starts/ends must be in sync
// but let's be graceful anyway, and just skip to next match
if ( pStart->m_tRowID==INVALID_ROWID || pEnd->m_tRowID==INVALID_ROWID )
break;
while ( pStart->m_tRowID < pEnd->m_tRowID )
pStart++;
if ( pStart->m_tRowID==INVALID_ROWID )
break;
while ( pEnd->m_tRowID < pStart->m_tRowID )
pEnd++;
if ( pEnd->m_tRowID==INVALID_ROWID )
break;
}
// should have found at least one id to cache
assert ( iCache );
assert ( iCache < MAX_BLOCK_DOCS );
dCache[iCache].m_tRowID = INVALID_ROWID;
// do caching
const ExtHit_t * pStartHits = m_dZoneStartTerm[iZone]->GetHits ( dCache );
const ExtHit_t * pEndHits = m_dZoneEndTerm[iZone]->GetHits ( dCache );
int iReserveStart = m_dZoneStartTerm[iZone]->GetHitsCount() / Max ( m_dZoneStartTerm[iZone]->GetDocsCount(), 1 );
int iReserveEnd = m_dZoneEndTerm[iZone]->GetHitsCount() / Max ( m_dZoneEndTerm[iZone]->GetDocsCount(), 1 );
int iReserve = Max ( iReserveStart, iReserveEnd );
// loop documents one by one
while ( pStartHits->m_tRowID!=INVALID_ROWID && pEndHits->m_tRowID!=INVALID_ROWID )
{
// load all hits for current document
RowID_t tCurRowID = pStartHits->m_tRowID;
// FIXME!!! replace by iterate then add elements to vector instead of searching each time
ZoneHits_t * pZone = nullptr;
CSphVector<ZoneInfo_t> & dZones = m_dZoneInfo[iZone];
if ( !dZones.IsEmpty() )
{
ZoneInfo_t * pInfo = dZones.BinarySearch ( bind ( &ZoneInfo_t::m_tRowID ), tCurRowID );
if ( pInfo )
pZone = pInfo->m_pHits;
}
if ( !pZone )
{
if ( !dZones.IsEmpty() && dZones.Last().m_tRowID>tCurRowID )
{
int iInsertPos = FindSpan ( dZones, tCurRowID );
if ( iInsertPos>=0 )
{
dZones.Insert ( iInsertPos, ZoneInfo_t{} );
auto& tInsertedZone = dZones[iInsertPos];
tInsertedZone.m_tRowID = tCurRowID;
tInsertedZone.m_pHits = new ZoneHits_t();
pZone = tInsertedZone.m_pHits;
}
} else
{
ZoneInfo_t & tElem = dZones.Add ();
tElem.m_tRowID = tCurRowID;
tElem.m_pHits = new ZoneHits_t();
pZone = tElem.m_pHits;
}
if ( pZone )
{
pZone->m_dStarts.Reserve ( iReserve );
pZone->m_dEnds.Reserve ( iReserve );
}
}
assert ( pEndHits->m_tRowID==tCurRowID );
// load all the pairs of start and end hits for it
// do it by with the FSM:
//
// state 'begin':
// - start marker -> set state 'inspan', startspan=pos++
// - end marker -> pos++
// - end of doc -> set state 'finish'
//
// state 'inspan':
// - start marker -> startspan = pos++
// - end marker -> set state 'outspan', endspan=pos++
// - end of doc -> set state 'finish'
//
// state 'outspan':
// - start marker -> set state 'inspan', commit span, startspan=pos++
// - end marker -> endspan = pos++
// - end of doc -> set state 'finish', commit span
//
// state 'finish':
// - we are done.
int bEofDoc = 0;
// state 'begin' is here.
while ( !bEofDoc && pEndHits->m_uHitpos < pStartHits->m_uHitpos )
{
++pEndHits;
bEofDoc |= (pEndHits->m_tRowID!=tCurRowID)?2:0;
}
if ( !bEofDoc )
{
// state 'inspan' (true) or 'outspan' (false)
bool bInSpan = true;
Hitpos_t iSpanBegin = pStartHits->m_uHitpos;
Hitpos_t iSpanEnd = pEndHits->m_uHitpos;
while ( bEofDoc!=3 ) /// action end-of-doc
{
// action inspan/start-marker
if ( bInSpan || ( bEofDoc & 2 ) )
{
++pStartHits;
bEofDoc |= (pStartHits->m_tRowID!=tCurRowID)?1:0;
} else
{
// action outspan/end-marker
++pEndHits;
bEofDoc |= (pEndHits->m_tRowID!=tCurRowID)?2:0;
}
if ( !( bEofDoc & 1 ) && pStartHits->m_uHitpos<pEndHits->m_uHitpos )
{
// actions for outspan/start-marker state
// <b>...<b>..<b>..</b> will ignore all the <b> inside.
if ( !bInSpan )
{
bInSpan = true;
pZone->m_dStarts.Add ( iSpanBegin );
pZone->m_dEnds.Add ( iSpanEnd );
iSpanBegin = pStartHits->m_uHitpos;
}
} else if ( !( bEofDoc & 2 ) )
{
// actions for inspan/end-marker state
// so, <b>...</b>..</b>..</b> will ignore all the </b> inside.
bInSpan = false;
iSpanEnd = pEndHits->m_uHitpos;
}
}
// action 'commit' for outspan/end-of-doc
if ( iSpanBegin < iSpanEnd )
{
pZone->m_dStarts.Add ( iSpanBegin );
pZone->m_dEnds.Add ( iSpanEnd );
}
}
// skip to the same doc
while ( pStartHits->m_tRowID!=INVALID_ROWID && pEndHits->m_tRowID!=INVALID_ROWID && pStartHits->m_tRowID!=pEndHits->m_tRowID )
{
while ( pStartHits->m_tRowID!=INVALID_ROWID && pEndHits->m_tRowID!=INVALID_ROWID && pStartHits->m_tRowID<pEndHits->m_tRowID )
pStartHits++;
while ( pStartHits->m_tRowID!=INVALID_ROWID && pEndHits->m_tRowID!=INVALID_ROWID && pEndHits->m_tRowID<pStartHits->m_tRowID )
pEndHits++;
}
// data sanity checks
assert ( pZone->m_dStarts.GetLength()==pZone->m_dEnds.GetLength() );
// update cache status
m_dZoneMax[iZone] = tCurRowID+1;
m_dZoneMin[iZone] = Min ( m_dZoneMin[iZone], tCurRowID );
}
}
// store current in-chunk positions
m_dZoneStart[iZone] = pStart;
m_dZoneEnd[iZone] = pEnd;
// cached a bunch of spans, try our check again
return ZoneCacheFind ( m_dZoneInfo, iZone, pHit, pLastSpan );
}
//////////////////////////////////////////////////////////////////////////
template<bool USE_BM25>
ExtRanker_T<USE_BM25>::ExtRanker_T ( const XQQuery_t & tXQ, const ISphQwordSetup & tSetup, const RankerSettings_t & tSettings )
: ExtRanker_c ( tXQ, tSetup, tSettings, USE_BM25 )
{}
template<bool USE_BM25>
const ExtDoc_t * ExtRanker_T<USE_BM25>::GetFilteredDocs ()
{
#if QDEBUG
printf ( "ranker getfiltereddocs %p\n", this );
#endif
while (true)
{
// get another chunk
SwitchProfile ( m_pCtx->m_pProfile, SPH_QSTATE_GET_DOCS );
const ExtDoc_t * pCand = m_pRoot->GetDocsChunk();
if ( !pCand )
{
SwitchProfile ( m_pCtx->m_pProfile, SPH_QSTATE_RANK );
return nullptr;
}
// create matches, and filter them
SwitchProfile ( m_pCtx->m_pProfile, SPH_QSTATE_FILTER );
int iDocs = 0;
RowID_t tMaxRowID = 0;
while ( pCand->m_tRowID!=INVALID_ROWID )
{
m_tTestMatch.m_tRowID = pCand->m_tRowID;
m_tTestMatch.m_pStatic = nullptr;
if ( m_pIndex->EarlyReject ( m_pCtx, m_tTestMatch ) )
{
pCand++;
continue;
}
tMaxRowID = pCand->m_tRowID;
m_dMyDocs[iDocs] = *pCand;
if_const ( USE_BM25 )
m_tTestMatch.m_iWeight = (int)( (pCand->m_fTFIDF+0.5f)*SPH_BM25_SCALE );
Swap ( m_tTestMatch, m_dMyMatches[iDocs] );
iDocs++;
pCand++;
}
SwitchProfile ( m_pCtx->m_pProfile, SPH_QSTATE_RANK );
// clean up zone hash
if ( !m_bZSlist )
CleanupZones ( tMaxRowID );
if ( iDocs )
{
if ( m_pNanoBudget )
*m_pNanoBudget -= g_iPredictorCostMatch*iDocs;
m_dMyDocs[iDocs].m_tRowID = INVALID_ROWID;
return m_dMyDocs;
}
}
}
//////////////////////////////////////////////////////////////////////////
template < bool USE_BM25 >
int ExtRanker_WeightSum_c<USE_BM25>::GetMatches ()
{
if ( !this->m_pRoot )
return 0;
SwitchProfile ( this->m_pCtx->m_pProfile, SPH_QSTATE_RANK );
const ExtDoc_t * pDoc = this->m_pDoclist;
int iMatches = 0;
const int iWeights = Min ( m_iWeights, 32 );
while ( iMatches<MAX_BLOCK_DOCS )
{
if ( !pDoc || pDoc->m_tRowID==INVALID_ROWID ) pDoc = this->GetFilteredDocs ();
if ( !pDoc ) break;
DWORD uRank = 0;
DWORD uMask = pDoc->m_uDocFields;
if ( !uMask )
{
// possible if we have more than 32 fields
// honestly loading all hits etc is cumbersome, so let's just fake it
uRank = 1;
} else
{
// just sum weights over the lowest 32 fields
for ( int i=0; i<iWeights; i++ )
if ( uMask & (1<<i) )
uRank += m_pWeights[i];
}
Swap ( this->m_dMatches[iMatches], this->m_dMyMatches[pDoc-this->m_dMyDocs] ); // OPTIMIZE? can avoid this swap and simply return m_dMyMatches (though in lesser chunks)
if_const ( USE_BM25 )
this->m_dMatches[iMatches].m_iWeight = this->m_dMatches[iMatches].m_iWeight + uRank*SPH_BM25_SCALE;
else
this->m_dMatches[iMatches].m_iWeight = uRank;
iMatches++;
pDoc++;
}
this->UpdateQcache ( iMatches );
this->m_pDoclist = pDoc;
return iMatches;
}
//////////////////////////////////////////////////////////////////////////
int ExtRanker_None_c::GetMatches ()
{
if ( !m_pRoot )
return 0;
SwitchProfile ( m_pCtx->m_pProfile, SPH_QSTATE_RANK );
const ExtDoc_t * pDoc = m_pDoclist;
int iMatches = 0;
while ( iMatches<MAX_BLOCK_DOCS )
{
if ( !pDoc || pDoc->m_tRowID==INVALID_ROWID ) pDoc = GetFilteredDocs ();
if ( !pDoc ) break;
Swap ( m_dMatches[iMatches], m_dMyMatches[pDoc-m_dMyDocs] ); // OPTIMIZE? can avoid this swap and simply return m_dMyMatches (though in lesser chunks)
m_dMatches[iMatches].m_iWeight = 1;
iMatches++;
pDoc++;
}
UpdateQcache ( iMatches );
m_pDoclist = pDoc;
return iMatches;
}
//////////////////////////////////////////////////////////////////////////
template < typename STATE, bool USE_BM25 >
ExtRanker_State_T<STATE,USE_BM25>::ExtRanker_State_T ( const XQQuery_t & tXQ, const ISphQwordSetup & tSetup, const RankerSettings_t & tSettings )
: ExtRanker_T<USE_BM25> ( tXQ, tSetup, tSettings )
{
// FIXME!!! move out the disable of m_bZSlist in case no zonespan nodes
if ( this->m_bZSlist )
m_dZonespans.Reserve ( MAX_BLOCK_DOCS * this->m_dZones.GetLength() );
m_pHitBase = nullptr;
}
static inline const ExtHit_t * RankerGetHits ( QueryProfile_c * pProfile, ExtNode_i * pRoot, const ExtDoc_t * pDocs )
{
if ( !pProfile )
return pRoot->GetHits ( pDocs );
pProfile->Switch ( SPH_QSTATE_GET_HITS );
const ExtHit_t * pHlist = pRoot->GetHits ( pDocs );
pProfile->Switch ( SPH_QSTATE_RANK );
return pHlist;
}
template < typename STATE, bool USE_BM25 >
int ExtRanker_State_T<STATE,USE_BM25>::GetMatches ()
{
if ( !this->m_pRoot )
return 0;
SwitchProfile ( this->m_pCtx->m_pProfile, SPH_QSTATE_RANK );
QueryProfile_c * pProfile = this->m_pCtx->m_pProfile;
int iMatches = 0;
const ExtHit_t * pHlist = this->m_pHitlist;
const ExtHit_t * pHitBase = m_pHitBase;
const ExtDoc_t * pDocs = this->m_pDoclist;
m_dZonespans.Resize(1);
int iLastZoneData = 0;
CSphVector<int> dSpans;
if ( this->m_bZSlist )
{
dSpans.Resize ( this->m_dZones.GetLength() );
dSpans.Fill ( -1 );
}
// warmup if necessary
if ( !pDocs )
{
pDocs = this->GetFilteredDocs ();
if ( !pDocs )
return 0;
pHlist = RankerGetHits ( pProfile, this->m_pRoot.get(), pDocs );
}
if ( !pHitBase )
pHitBase = pHlist;
// main matching loop
const ExtDoc_t * pDoc = pDocs;
for ( RowID_t tCurRowID=INVALID_ROWID; iMatches < MAX_BLOCK_DOCS; )
{
// keep ranking
while ( pHlist->m_tRowID==tCurRowID )
{
m_tState.Update ( pHlist );
if ( this->m_bZSlist )
{
ARRAY_FOREACH ( i, this->m_dZones )
{
int iSpan;
if ( this->IsInZone ( i, pHlist, &iSpan )!=SPH_ZONE_FOUND )
continue;
if ( iSpan!=dSpans[i] )
{
m_dZonespans.Add ( i );
m_dZonespans.Add ( iSpan );
dSpans[i] = iSpan;
}
}
}
++pHlist;
}
// flush current doc
if ( tCurRowID!=INVALID_ROWID )
{
assert ( tCurRowID==pDoc->m_tRowID );
Swap ( this->m_dMatches[iMatches], this->m_dMyMatches[pDoc-this->m_dMyDocs] );
this->m_dMatches[iMatches].m_iWeight = m_tState.Finalize ( this->m_dMatches[iMatches] );
if ( this->m_bZSlist )
{
m_dZonespans[iLastZoneData] = m_dZonespans.GetLength() - iLastZoneData - 1;
this->m_dMatches[iMatches].m_iTag = iLastZoneData;
iLastZoneData = m_dZonespans.GetLength();
m_dZonespans.Add(0);
dSpans.Fill ( -1 );
}
iMatches++;
}
// boundary checks
if ( pHlist->m_tRowID==INVALID_ROWID )
{
if ( this->m_bZSlist && tCurRowID!=INVALID_ROWID )
this->CleanupZones ( tCurRowID );
// there are no more hits for current docs block; do we have a next one?
assert ( pDocs );
pDoc = pDocs = this->GetFilteredDocs ();
// we don't, so bail out
if ( !pDocs )
break;
// we do, get some hits with proper profile
pHlist = RankerGetHits ( pProfile, this->m_pRoot.get(), pDocs );
}
// skip until next good doc/hit pair
assert ( pDoc->m_tRowID<=pHlist->m_tRowID );
while ( pDoc->m_tRowID<pHlist->m_tRowID ) pDoc++;
assert ( pDoc->m_tRowID==pHlist->m_tRowID );
tCurRowID = pHlist->m_tRowID;
}
this->m_pDoclist = pDocs;
this->m_pHitlist = pHlist;
if ( !m_pHitBase )
m_pHitBase = pHitBase;
this->UpdateQcache ( iMatches );
return iMatches;
}
//////////////////////////////////////////////////////////////////////////
template < bool USE_BM25, bool HANDLE_DUPES >
struct RankerState_Proximity_fn : public ISphExtra
{
BYTE m_uLCS[SPH_MAX_FIELDS];
BYTE m_uCurLCS;
int m_iExpDelta;
int m_iLastHitPosWithField;
int m_iFields;
const int * m_pWeights;
DWORD m_uLcsTailPos;
DWORD m_uLcsTailQposMask;
DWORD m_uCurQposMask;
DWORD m_uCurPos;
bool Init ( int iFields, const int * pWeights, ExtRanker_c *, CSphString &, DWORD )
{
memset ( m_uLCS, 0, sizeof(m_uLCS) );
m_uCurLCS = 0;
m_iExpDelta = -INT_MAX;
m_iLastHitPosWithField = -INT_MAX;
m_iFields = iFields;
m_pWeights = pWeights;
m_uLcsTailPos = 0;
m_uLcsTailQposMask = 0;
m_uCurQposMask = 0;
m_uCurPos = 0;
return true;
}
void Update ( const ExtHit_t * pHlist )
{
if_const ( !HANDLE_DUPES )
{
// all query keywords are unique
// simpler path (just do the delta)
const int iPosWithField = HITMAN::GetPosWithField ( pHlist->m_uHitpos );
int iDelta = iPosWithField - pHlist->m_uQuerypos;
if ( iPosWithField>m_iLastHitPosWithField )
m_uCurLCS = ( ( iDelta==m_iExpDelta ) ? m_uCurLCS : 0 ) + BYTE(pHlist->m_uWeight);
DWORD uField = HITMAN::GetField ( pHlist->m_uHitpos );
if ( m_uCurLCS>m_uLCS[uField] )
m_uLCS[uField] = m_uCurLCS;
m_iLastHitPosWithField = iPosWithField;
m_iExpDelta = iDelta + pHlist->m_uSpanlen - 1; // !COMMIT why spanlen??
} else
{
// keywords are duplicated in the query
// so there might be multiple qpos entries sharing the same hitpos
DWORD uPos = HITMAN::GetPosWithField ( pHlist->m_uHitpos );
DWORD uField = HITMAN::GetField ( pHlist->m_uHitpos );
// reset accumulated data from previous field
if ( (DWORD)HITMAN::GetField ( m_uCurPos )!=uField )
m_uCurQposMask = 0;
if ( uPos!=m_uCurPos )
{
// next new and shiny hitpos in line
// FIXME!? what do we do with longer spans? keep looking? reset?
if ( m_uCurLCS<2 )
{
m_uLcsTailPos = m_uCurPos;
m_uLcsTailQposMask = m_uCurQposMask;
m_uCurLCS = 1; // FIXME!? can this ever be different? ("a b" c) maybe..
}
m_uCurQposMask = 0;
m_uCurPos = uPos;
if ( m_uLCS[uField] < pHlist->m_uWeight )
m_uLCS[uField] = BYTE(pHlist->m_uWeight);
}
// add that qpos to current qpos mask (for the current hitpos)
m_uCurQposMask |= ( 1UL << pHlist->m_uQuerypos );
// and check if that results in a better lcs match now
int iDelta = m_uCurPos - m_uLcsTailPos;
if ( iDelta && iDelta<32 && ( m_uCurQposMask >> iDelta ) & m_uLcsTailQposMask )
{
// cool, it matched!
m_uLcsTailQposMask = ( 1UL << pHlist->m_uQuerypos ); // our lcs span now ends with a specific qpos
m_uLcsTailPos = m_uCurPos; // and in a specific position
m_uCurLCS = BYTE ( m_uCurLCS + pHlist->m_uWeight ); // and it's longer
m_uCurQposMask = 0; // and we should avoid matching subsequent hits on the same hitpos
// update per-field vector
if ( m_uCurLCS>m_uLCS[uField] )
m_uLCS[uField] = m_uCurLCS;
}
}
}
int Finalize ( const CSphMatch & tMatch )
{
m_uCurLCS = 0;
m_iExpDelta = -1;
m_iLastHitPosWithField = -1;
if_const ( HANDLE_DUPES )
{
m_uLcsTailPos = 0;
m_uLcsTailQposMask = 0;
m_uCurQposMask = 0;
m_uCurPos = 0;
}
int iRank = 0;
for ( int i=0; i<m_iFields; i++ )
{
iRank += (int)(m_uLCS[i])*m_pWeights[i];
m_uLCS[i] = 0;
}
return USE_BM25 ? tMatch.m_iWeight + iRank*SPH_BM25_SCALE : iRank;
}
};
//////////////////////////////////////////////////////////////////////////
// sph04, proximity + exact boost
template <bool HANDLE_DUPES>
class RankerState_ProximityBM25Exact_T : public ISphExtra
{
public:
bool Init ( int iFields, const int * pWeights, ExtRanker_c * pRanker, CSphString &, DWORD )
{
m_tExactHit.Init ( iFields );
ResetDocFactors();
memset ( m_uLCS, 0, sizeof(m_uLCS) );
m_iExpDelta = -INT_MAX;
m_iLastHitPos = -1;
m_uMinExpPos = 0;
m_iFields = iFields;
m_pWeights = pWeights;
// tricky bit
// in expr and export rankers, this gets handled by the overridden (!) SetQwordsIDF()
// but in all the other ones, we need this, because SetQwordsIDF() won't touch the state by default
// FIXME? this is actually MaxUniqueQpos, queries like [foo|foo|foo] might break
m_iMaxQpos = pRanker->m_iMaxQpos;
return true;
}
void Update ( const ExtHit_t * pHlist )
{
DWORD uField = HITMAN::GetField ( pHlist->m_uHitpos );
int iPos = HITMAN::GetPos ( pHlist->m_uHitpos );
DWORD uPosWithField = HITMAN::GetPosWithField ( pHlist->m_uHitpos );
if_const ( !HANDLE_DUPES )
{
// update LCS
int iDelta = uPosWithField - pHlist->m_uQuerypos;
if ( iDelta==m_iExpDelta )
{
if ( (int)uPosWithField>m_iLastHitPos )
m_uCurLCS = (BYTE)( m_uCurLCS + pHlist->m_uWeight );
if ( HITMAN::IsEnd ( pHlist->m_uHitpos ) && (int)pHlist->m_uQuerypos==m_iMaxQpos && iPos==m_iMaxQpos )
m_tExactHit.BitSet ( uField );
} else
{
if ( (int)uPosWithField>m_iLastHitPos )
m_uCurLCS = BYTE(pHlist->m_uWeight);
if ( iPos==1 && HITMAN::IsEnd ( pHlist->m_uHitpos ) && m_iMaxQpos==1 )
m_tExactHit.BitSet ( uField );
}
if ( m_uCurLCS>m_uLCS[uField] )
m_uLCS[uField] = m_uCurLCS;
m_iExpDelta = iDelta + pHlist->m_uSpanlen - 1;
m_iLastHitPos = uPosWithField;
}
else
{
// reset accumulated data from previous field
if ( (DWORD)HITMAN::GetField ( m_uCurPos )!=uField )
{
m_uCurPos = 0;
m_uLcsTailPos = 0;
m_uCurQposMask = 0;
m_uCurLCS = 0;
}
if ( (DWORD)uPosWithField!=m_uCurPos )
{
// next new and shiny hitpos in line
// FIXME!? what do we do with longer spans? keep looking? reset?
if ( m_uCurLCS<2 )
{
m_uLcsTailPos = m_uCurPos;
m_uLcsTailQposMask = m_uCurQposMask;
m_uCurLCS = 1;
}
m_uCurQposMask = 0;
m_uCurPos = uPosWithField;
if ( m_uLCS [ uField ]<pHlist->m_uWeight )
m_uLCS [ uField ] = BYTE ( pHlist->m_uWeight );
}
// add that qpos to current qpos mask (for the current hitpos)
m_uCurQposMask |= ( 1UL << pHlist->m_uQuerypos );
// and check if that results in a better lcs match now
int iDelta = ( m_uCurPos-m_uLcsTailPos );
if ( iDelta && iDelta<32 && ( m_uCurQposMask >> iDelta ) & m_uLcsTailQposMask )
{
// cool, it matched!
m_uLcsTailQposMask = ( 1UL << pHlist->m_uQuerypos ); // our lcs span now ends with a specific qpos
m_uLcsTailPos = m_uCurPos; // and in a specific position
m_uCurLCS = BYTE ( m_uCurLCS+pHlist->m_uWeight ); // and it's longer
m_uCurQposMask = 0; // and we should avoid matching subsequent hits on the same hitpos
// update per-field vector
if ( m_uCurLCS>m_uLCS[uField] )
m_uLCS[uField] = m_uCurLCS;
}
if ( iDelta==m_iExpDelta )
{
if ( HITMAN::IsEnd ( pHlist->m_uHitpos ) && (int)pHlist->m_uQuerypos==m_iMaxQpos && iPos==m_iMaxQpos )
m_tExactHit.BitSet ( uField );
} else
{
if ( iPos==1 && HITMAN::IsEnd ( pHlist->m_uHitpos ) && m_iMaxQpos==1 )
m_tExactHit.BitSet ( uField );
}
m_iExpDelta = iDelta + pHlist->m_uSpanlen - 1;
}
if ( m_uCurLCS>m_uLCS[uField] )
m_uLCS[uField] = m_uCurLCS;
if ( !m_dMinHitPos[uField] )
m_dMinHitPos[uField] = iPos;
m_uMinExpPos = HITMAN::GetPosWithField ( pHlist->m_uHitpos ) + 1;
}
int Finalize ( const CSphMatch & tMatch )
{
m_iExpDelta = -1;
m_iLastHitPos = -1;
int iRank = 0;
for ( int i=0; i<m_iFields; i++ )
{
iRank += (int)( 4*m_uLCS[i] + 2*( m_dMinHitPos[i]==1?1:0 ) + ( m_tExactHit.BitGet(i)?1:0 ))*m_pWeights[i];
m_uLCS[i] = 0;
}
ResetDocFactors();
return tMatch.m_iWeight + iRank*SPH_BM25_SCALE;
}
private:
BYTE m_uLCS[SPH_MAX_FIELDS];
int m_dMinHitPos[SPH_MAX_FIELDS];
BYTE m_uCurLCS;
int m_iExpDelta;
int m_iLastHitPos;
DWORD m_uMinExpPos;
int m_iFields;
const int * m_pWeights;
DWORD m_uCurPos;
DWORD m_uLcsTailPos;
DWORD m_uLcsTailQposMask;
DWORD m_uCurQposMask;
int m_iMaxQpos;
CSphBitvec m_tExactHit;
void ResetDocFactors()
{
// OPTIMIZE? quick full wipe? (using dwords/sse/whatever)
m_uCurLCS = 0;
if_const ( HANDLE_DUPES )
{
m_uCurPos = 0;
m_uLcsTailPos = 0;
m_uLcsTailQposMask = 0;
m_uCurQposMask = 0;
}
m_tExactHit.Clear();
memset ( m_dMinHitPos, 0, sizeof(m_dMinHitPos) );
}
};
template < bool USE_BM25 >
struct RankerState_ProximityPayload_fn : public RankerState_Proximity_fn<USE_BM25,false>
{
DWORD m_uPayloadRank;
DWORD m_uPayloadMask;
bool Init ( int iFields, const int * pWeights, ExtRanker_c * pRanker, CSphString & sError, DWORD )
{
RankerState_Proximity_fn<USE_BM25,false>::Init ( iFields, pWeights, pRanker, sError, false );
m_uPayloadRank = 0;
m_uPayloadMask = pRanker->m_uPayloadMask;
return true;
}
void Update ( const ExtHit_t * pHlist )
{
DWORD uField = HITMAN::GetField ( pHlist->m_uHitpos );
if ( ( 1<<uField ) & m_uPayloadMask )
this->m_uPayloadRank += HITMAN::GetPos ( pHlist->m_uHitpos ) * this->m_pWeights[uField];
else
RankerState_Proximity_fn<USE_BM25,false>::Update ( pHlist );
}
int Finalize ( const CSphMatch & tMatch )
{
// as usual, redundant 'this' is just because gcc is stupid
this->m_uCurLCS = 0;
this->m_iExpDelta = -1;
this->m_iLastHitPosWithField = -1;
int iRank = (int)m_uPayloadRank;
for ( int i=0; i<this->m_iFields; i++ )
{
// no special care for payload fields as their LCS will be 0 anyway
iRank += (int)(this->m_uLCS[i])*this->m_pWeights[i];
this->m_uLCS[i] = 0;
}
m_uPayloadRank = 0;
return USE_BM25 ? tMatch.m_iWeight + iRank*SPH_BM25_SCALE : iRank;
}
};
//////////////////////////////////////////////////////////////////////////
struct RankerState_MatchAny_fn : public RankerState_Proximity_fn<false,false>
{
int m_iPhraseK;
BYTE m_uMatchMask[SPH_MAX_FIELDS];
bool Init ( int iFields, const int * pWeights, ExtRanker_c * pRanker, CSphString & sError, DWORD )
{
RankerState_Proximity_fn<false,false>::Init ( iFields, pWeights, pRanker, sError, false );
m_iPhraseK = 0;
for ( int i=0; i<iFields; i++ )
m_iPhraseK += pWeights[i] * pRanker->m_iQwords;
memset ( m_uMatchMask, 0, sizeof(m_uMatchMask) );
return true;
}
void Update ( const ExtHit_t * pHlist )
{
RankerState_Proximity_fn<false,false>::Update ( pHlist );
m_uMatchMask [ HITMAN::GetField ( pHlist->m_uHitpos ) ] |= ( 1<<(pHlist->m_uQuerypos-1) );
}
int Finalize ( const CSphMatch & )
{
m_uCurLCS = 0;
m_iExpDelta = -1;
m_iLastHitPosWithField = -1;
int iRank = 0;
for ( int i=0; i<m_iFields; ++i )
{
if ( m_uMatchMask[i] )
iRank += (int)( sphBitCount ( m_uMatchMask[i] ) + ( m_uLCS[i]-1 )*m_iPhraseK )*m_pWeights[i];
m_uMatchMask[i] = 0;
m_uLCS[i] = 0;
}
return iRank;
}
};
//////////////////////////////////////////////////////////////////////////
struct RankerState_Wordcount_fn : public ISphExtra
{
int m_iRank;
const int * m_pWeights;
bool Init ( int, const int * pWeights, ExtRanker_c *, CSphString &, DWORD )
{
m_iRank = 0;
m_pWeights = pWeights;
return true;
}
void Update ( const ExtHit_t * pHlist )
{
m_iRank += m_pWeights [ HITMAN::GetField ( pHlist->m_uHitpos ) ];
}
int Finalize ( const CSphMatch & )
{
int iRes = m_iRank;
m_iRank = 0;
return iRes;
}
};
//////////////////////////////////////////////////////////////////////////
struct RankerState_Fieldmask_fn : public ISphExtra
{
DWORD m_uRank;
bool Init ( int, const int *, ExtRanker_c *, CSphString &, DWORD )
{
m_uRank = 0;
return true;
}
void Update ( const ExtHit_t * pHlist )
{
m_uRank |= 1UL << HITMAN::GetField ( pHlist->m_uHitpos );
}
int Finalize ( const CSphMatch & )
{
DWORD uRes = m_uRank;
m_uRank = 0;
return uRes;
}
};
struct RankerState_Plugin_fn final : public ISphExtra
{
RankerState_Plugin_fn() = default;
~RankerState_Plugin_fn() final
{
assert ( m_pPlugin );
if ( m_pPlugin->m_fnDeinit )
m_pPlugin->m_fnDeinit ( m_pData );
}
bool Init ( int iFields, const int * pWeights, ExtRanker_c * pRanker, CSphString & sError, DWORD )
{
if ( !m_pPlugin->m_fnInit )
return true;
SPH_RANKER_INIT r;
r.num_field_weights = iFields;
r.field_weights = const_cast<int*>(pWeights);
r.options = m_sOptions.cstr();
r.payload_mask = pRanker->m_uPayloadMask;
r.num_query_words = pRanker->m_iQwords;
r.max_qpos = pRanker->m_iMaxQpos;
char sErrorBuf [ SPH_UDF_ERROR_LEN ];
if ( m_pPlugin->m_fnInit ( &m_pData, &r, sErrorBuf )==0 )
return true;
sError = sErrorBuf;
return false;
}
void Update ( const ExtHit_t * p )
{
if ( !m_pPlugin->m_fnUpdate )
return;
SPH_RANKER_HIT h;
h.doc_id = p->m_tRowID;
h.hit_pos = p->m_uHitpos;
h.query_pos = p->m_uQuerypos;
h.node_pos = p->m_uNodepos;
h.span_length = p->m_uSpanlen;
h.match_length = p->m_uMatchlen;
h.weight = p->m_uWeight;
h.query_pos_mask = p->m_uQposMask;
m_pPlugin->m_fnUpdate ( m_pData, &h );
}
int Finalize ( const CSphMatch & tMatch )
{
// at some point in the future, we might start passing the entire match,
// with blackjack, hookers, attributes, and their schema; but at this point,
// the only sort-of useful part of a match that we are able to push down to
// the ranker plugin is the match weight
return m_pPlugin->m_fnFinalize ( m_pData, tMatch.m_iWeight );
}
private:
void * m_pData = nullptr;
PluginRankerRefPtr_c m_pPlugin;
CSphString m_sOptions;
bool ExtraDataImpl ( ExtraData_e eType, void ** ppResult ) final
{
switch ( eType )
{
case EXTRA_SET_RANKER_PLUGIN: m_pPlugin = *(PluginRankerRefPtr_c*)ppResult; break;
case EXTRA_SET_RANKER_PLUGIN_OPTS: m_sOptions = (char*)ppResult; break;
default: return false;
}
return true;
}
};
//////////////////////////////////////////////////////////////////////////
class FactorPool_c
{
public:
void Prealloc ( int iElementSize, int nElements );
BYTE * Alloc ();
void Free ( BYTE * pPtr );
int GetElementSize() const;
int GetIntElementSize () const;
void AddToHash ( const RowTagged_t & tRow, BYTE * pPacked );
void AddRef ( const RowTagged_t & tRow );
void Release ( const RowTagged_t & tRow );
void Flush ();
bool IsInitialized() const;
SphFactorHash_t * GetHashPtr();
private:
int m_iElementSize = 0;
CSphFixedVector<BYTE> m_dPool { 0 };
SphFactorHash_t m_dHash { 0 };
CSphFreeList m_dFree;
SphFactorHashEntry_t * Find ( const RowTagged_t & tRow ) const;
bool FlushEntry ( SphFactorHashEntry_t * pEntry );
};
void FactorPool_c::Prealloc ( int iElementSize, int nElements )
{
m_iElementSize = iElementSize;
// large query + index with many fields + max_matches could overflow int size
// FIXME!!! is it worth to fail such large qeury on start of the search without special explicitly set flag?
int64_t iPoolSize = (int64_t)nElements * GetIntElementSize();
m_dPool.Reset ( iPoolSize );
m_dHash.Reset ( nElements );
m_dFree.Reset ( nElements );
memset ( m_dHash.Begin(), 0, sizeof(m_dHash[0])*m_dHash.GetLength() );
}
BYTE * FactorPool_c::Alloc ()
{
int64_t iIndex = m_dFree.Get();
assert ( iIndex>=0 && iIndex*GetIntElementSize()<m_dPool.GetLength64() );
return m_dPool.Begin() + iIndex * GetIntElementSize();
}
void FactorPool_c::Free ( BYTE * pPtr )
{
if ( !pPtr )
return;
assert ( (pPtr-m_dPool.Begin() ) % GetIntElementSize()==0);
assert ( pPtr>=m_dPool.Begin() && pPtr<&( m_dPool.Last() ) );
int iIndex = int ( pPtr-m_dPool.Begin() ) / GetIntElementSize();
m_dFree.Free ( iIndex );
}
int FactorPool_c::GetIntElementSize () const
{
return m_iElementSize+sizeof(SphFactorHashEntry_t);
}
int FactorPool_c::GetElementSize() const
{
return m_iElementSize;
}
void FactorPool_c::AddToHash ( const RowTagged_t & tRow, BYTE * pPacked )
{
auto * pNew = (SphFactorHashEntry_t *)(pPacked+m_iElementSize);
pNew->m_iRefCount = 0;
pNew->m_pPrev = nullptr;
pNew->m_pNext = nullptr;
DWORD uKey = FactorPoolHash ( tRow, m_dHash.GetLength() );
if ( m_dHash[uKey] )
{
SphFactorHashEntry_t * pStart = m_dHash[uKey];
pNew->m_pNext = pStart;
pStart->m_pPrev = pNew;
}
pNew->m_pData = pPacked;
pNew->m_tRow = tRow;
m_dHash[uKey] = pNew;
}
SphFactorHashEntry_t * FactorPool_c::Find ( const RowTagged_t & tRow ) const
{
DWORD uKey = FactorPoolHash ( tRow, m_dHash.GetLength() );
if ( m_dHash[uKey] )
{
SphFactorHashEntry_t * pEntry = m_dHash[uKey];
while ( pEntry )
{
if ( pEntry->m_tRow==tRow )
return pEntry;
pEntry = pEntry->m_pNext;
}
}
return nullptr;
}
void FactorPool_c::AddRef ( const RowTagged_t & tRow )
{
if ( tRow.m_tID==INVALID_ROWID )
return;
SphFactorHashEntry_t * pEntry = Find ( tRow );
if ( pEntry )
pEntry->m_iRefCount++;
}
void FactorPool_c::Release ( const RowTagged_t & tRow )
{
if ( tRow.m_tID==INVALID_ROWID )
return;
SphFactorHashEntry_t * pEntry = Find ( tRow );
if ( pEntry )
{
pEntry->m_iRefCount--;
bool bHead = !pEntry->m_pPrev;
SphFactorHashEntry_t * pNext = pEntry->m_pNext;
if ( FlushEntry ( pEntry ) && bHead )
m_dHash [ FactorPoolHash ( tRow, m_dHash.GetLength() ) ] = pNext;
}
}
bool FactorPool_c::FlushEntry ( SphFactorHashEntry_t * pEntry )
{
assert ( pEntry );
assert ( pEntry->m_iRefCount>=0 );
if ( pEntry->m_iRefCount )
return false;
if ( pEntry->m_pPrev )
pEntry->m_pPrev->m_pNext = pEntry->m_pNext;
if ( pEntry->m_pNext )
pEntry->m_pNext->m_pPrev = pEntry->m_pPrev;
Free ( pEntry->m_pData );
return true;
}
void FactorPool_c::Flush()
{
[[maybe_unused]] int iUsed = 0;
ARRAY_FOREACH ( i, m_dHash )
{
SphFactorHashEntry_t * pEntry = m_dHash[i];
while ( pEntry )
{
SphFactorHashEntry_t * pNext = pEntry->m_pNext;
bool bHead = !pEntry->m_pPrev;
#ifndef NDEBUG
if ( pEntry->m_iRefCount )
iUsed++;
#endif
if ( FlushEntry(pEntry) && bHead )
m_dHash[i] = pNext;
pEntry = pNext;
}
}
assert ( !m_dHash.GetLength() || iUsed+MAX_BLOCK_DOCS<=m_dHash.GetLength() );
}
DWORD FactorPoolHash ( const RowTagged_t & tRow, int iLen )
{
return (DWORD)( ( tRow.m_tID ^ tRow.m_iTag ) % iLen );
}
bool FactorPool_c::IsInitialized() const
{
return !!m_iElementSize;
}
SphFactorHash_t * FactorPool_c::GetHashPtr ()
{
return &m_dHash;
}
//////////////////////////////////////////////////////////////////////////
// EXPRESSION RANKER
//////////////////////////////////////////////////////////////////////////
/// lean hit
/// only stores keyword id and hit position
struct LeanHit_t
{
WORD m_uQuerypos;
Hitpos_t m_uHitpos;
LeanHit_t & operator = ( const ExtHit_t & rhs )
{
m_uQuerypos = rhs.m_uQuerypos;
m_uHitpos = rhs.m_uHitpos;
return *this;
}
};
/// ranker state that computes weight dynamically based on user supplied expression (formula)
template < bool NEED_PACKEDFACTORS = false, bool HANDLE_DUPES = false >
struct RankerState_Expr_fn : public ISphExtra
{
public:
// per-field and per-document stuff
CSphFixedVector<BYTE> m_uLCS { 0 };
BYTE m_uCurLCS;
DWORD m_uCurPos;
DWORD m_uLcsTailPos;
DWORD m_uLcsTailQposMask;
DWORD m_uCurQposMask;
int m_iExpDelta;
int m_iLastHitPos;
int m_iFields = 0;
const int * m_pWeights = nullptr;
DWORD m_uDocBM25 = 0;
CSphBitvec m_tMatchedFields;
int m_iCurrentField = 0;
CSphFixedVector<DWORD> m_uHitCount { 0 };
CSphFixedVector<DWORD> m_uWordCount { 0 };
CSphFixedVector<float> m_dIDF { 0 };
CSphFixedVector<float> m_dTFIDF { 0 };
CSphFixedVector<float> m_dMinIDF { 0 };
CSphFixedVector<float> m_dMaxIDF { 0 };
CSphFixedVector<float> m_dSumIDF { 0 };
CSphFixedVector<int> m_iMinHitPos { 0 };
CSphFixedVector<int> m_iMinBestSpanPos { 0 };
CSphBitvec m_tExactHit;
CSphBitvec m_tExactOrder;
CSphBitvec m_tKeywords;
DWORD m_uDocWordCount = 0;
CSphFixedVector<int> m_iMaxWindowHits { 0 };
CSphFixedVector<int> m_dTF { 0 }; ///< for bm25a
float m_fDocBM25A = 0.0f; ///< for bm25a
CSphFixedVector<int> m_dFieldTF { 0 }; ///< for bm25f, per-field layout (ie all field0 tfs, then all field1 tfs, etc)
CSphFixedVector<int> m_iMinGaps { 0 }; ///< number of gaps in the minimum matching window
const char * m_sExpr = nullptr;
CSphRefcountedPtr<ISphExpr> m_pExpr;
ESphAttr m_eExprType { SPH_ATTR_NONE };
const CSphSchema * m_pSchema = nullptr;
CSphAttrLocator m_tFieldLensLoc;
float m_fAvgDocLen = 0.0f;
const int64_t * m_pFieldLens = nullptr;
int64_t m_iTotalDocuments = 0;
float m_fParamK1 = 1.2f;
float m_fParamB = 0.75f;
int m_iMaxQpos = 65535; ///< among all words, including dupes
CSphFixedVector<WORD> m_dTermDupes { 0 };
CSphFixedVector<Hitpos_t> m_dTermsHit { 0 };
CSphBitvec m_tHasMultiQpos;
int m_uLastSpanStart = 0;
FactorPool_c m_tFactorPool;
int m_iPoolMatchCapacity = 0;
int m_iMatchTag = 0;
// per-query stuff
int m_iMaxLCS = 0;
int m_iQueryWordCount = 0;
public:
// internal state, and factor settings
// max_window_hits(n)
CSphVector<DWORD> m_dWindow;
int m_iWindowSize = 1;
// min_gaps
int m_iHaveMinWindow = 0; ///< whether to compute minimum matching window, and over how many query words
int m_iMinWindowWords = 0; ///< how many unique words have we seen so far
CSphVector<LeanHit_t> m_dMinWindowHits; ///< current minimum matching window candidate hits
CSphFixedVector<int> m_dMinWindowCounts { 0 }; ///< maps querypos indexes to number of occurrencess in m_dMinWindowHits
// exact_order
int m_iLastField = 0;
int m_iLastQuerypos = 0;
int m_iExactOrderWords = 0;
// LCCS and Weighted LCCS
CSphFixedVector<BYTE> m_dLCCS { 0 };
CSphFixedVector<float> m_dWLCCS { 0 };
CSphFixedVector<WORD> m_dNextQueryPos { 0 }; ///< words positions might have gaps due to stop-words
WORD m_iQueryPosLCCS = 0;
int m_iHitPosLCCS = 0;
BYTE m_iLenLCCS = 0;
float m_fWeightLCCS = 0.0f;
// ATC
#define XRANK_ATC_WINDOW_LEN 10
#define XRANK_ATC_BUFFER_LEN 30
#define XRANK_ATC_DUP_DIV 0.25f
#define XRANK_ATC_EXP 1.75f
struct AtcHit_t
{
int m_iHitpos = 0;
WORD m_uQuerypos = 65535;
};
AtcHit_t m_dAtcHits[XRANK_ATC_BUFFER_LEN]; ///< ATC hits ring buffer
int m_iAtcHitStart = 0; ///< hits start at ring buffer
int m_iAtcHitCount = 0; ///< hits amount in buffer
CSphFixedVector<float> m_dAtcTerms { 0 }; ///< per-word ATC
CSphBitvec m_dAtcProcessedTerms; ///< temporary processed mask
DWORD m_uAtcField = 0; ///< currently processed field
CSphFixedVector<float> m_dAtc { 0 }; ///< ATC per-field values
bool m_bAtcHeadProcessed = false; ///< flag for hits from buffer start to window start
bool m_bHaveAtc = false; ///< calculate ATC?
bool m_bWantAtc = false;
void UpdateATC ( bool bFlushField );
float TermTC ( int iTerm, bool bLeft );
public:
bool Init ( int iFields, const int * pWeights, ExtRanker_T<true> * pRanker, CSphString & sError, DWORD uFactorFlags );
void Update ( const ExtHit_t * pHlist );
int Finalize ( const CSphMatch & tMatch );
bool IsTermSkipped ( int iTerm );
public:
/// setup per-keyword data needed to compute the factors
/// (namely IDFs, counts, masks etc)
/// WARNING, CALLED EVEN BEFORE INIT()!
void SetQwords ( const ExtQwordsHash_t & hQwords )
{
m_dIDF.Reset ( m_iMaxQpos+1 ); // [MaxUniqQpos, MaxQpos] range will be all 0, but anyway
m_dIDF.Fill ( 0.0f );
m_dTF.Reset ( m_iMaxQpos+1 );
m_dTF.Fill ( 0 );
m_dMinWindowCounts.Reset ( m_iMaxQpos+1 );
m_dMinWindowCounts.Fill ( 0 );
m_iQueryWordCount = 0;
m_tKeywords.Init ( m_iMaxQpos+1 ); // will not be tracking dupes
bool bGotExpanded = false;
CSphVector<WORD> dQueryPos;
dQueryPos.Reserve ( m_iMaxQpos+1 );
for ( const auto& tQword : hQwords )
{
// tricky bit
// for query_word_count, we only want to count keywords that are not (!) excluded by the query
// that is, in (aa NOT bb) case, we want a value of 1, not 2
// there might be tail excluded terms these not affected MaxQpos
const ExtQword_t & dCur = tQword.second;
const int iQueryPos = dCur.m_iQueryPos;
if ( dCur.m_bExcluded )
continue;
bool bQposUsed = m_tKeywords.BitGet ( iQueryPos );
bGotExpanded |= bQposUsed;
m_iQueryWordCount += ( bQposUsed ? 0 : 1 ); // count only one term at that position
m_tKeywords.BitSet ( iQueryPos ); // just to assert at early stage!
m_dIDF [ iQueryPos ] += dCur.m_fIDF;
m_dTF [ iQueryPos ]++;
if ( !bQposUsed )
dQueryPos.Add ( (WORD)iQueryPos );
}
// FIXME!!! average IDF for expanded terms (aot morphology or dict=keywords)
if ( bGotExpanded )
ARRAY_FOREACH ( i, m_dTF )
{
if ( m_dTF[i]>1 )
m_dIDF[i] /= m_dTF[i];
}
m_dTF.Fill ( 0 );
// set next term position for current term in query (degenerates to +1 in the simplest case)
dQueryPos.Sort();
m_dNextQueryPos.Reset ( m_iMaxQpos+1 );
m_dNextQueryPos.Fill ( (WORD)-1 ); // WORD_MAX filler
for ( int i=0; i<dQueryPos.GetLength()-1; i++ )
{
WORD iCutPos = dQueryPos[i];
WORD iNextPos = dQueryPos[i+1];
m_dNextQueryPos[iCutPos] = iNextPos;
}
}
void SetTermDupes ( const ExtQwordsHash_t & hQwords, int iMaxQpos, const ExtNode_i * pRoot )
{
if ( !pRoot )
return;
m_dTermsHit.Reset ( iMaxQpos + 1 );
m_dTermsHit.Fill ( EMPTY_HIT );
m_tHasMultiQpos.Init ( iMaxQpos+1 );
m_dTermDupes.Reset ( iMaxQpos+1 );
m_dTermDupes.Fill ( (WORD)-1 );
CSphVector<TermPos_t> dTerms;
dTerms.Reserve ( iMaxQpos );
pRoot->GetTerms ( hQwords, dTerms );
// reset excluded for all duplicates
ARRAY_FOREACH ( i, dTerms )
{
WORD uAtomPos = dTerms[i].m_uAtomPos;
WORD uQpos = dTerms[i].m_uQueryPos;
if ( uAtomPos!=uQpos )
{
m_tHasMultiQpos.BitSet ( uAtomPos );
m_tHasMultiQpos.BitSet ( uQpos );
}
m_tKeywords.BitSet ( uAtomPos );
m_tKeywords.BitSet ( uQpos );
m_dTermDupes[uAtomPos] = uQpos;
// fill missed idf for dups
if ( fabs ( m_dIDF[uAtomPos] )<=1e-6 )
m_dIDF[uAtomPos] = m_dIDF[uQpos];
}
}
/// finalize per-document factors that, well, need finalization
void FinalizeDocFactors ( const CSphMatch & tMatch )
{
m_uDocBM25 = tMatch.m_iWeight;
for ( int i=0; i<m_iFields; i++ )
{
m_uWordCount[i] = sphBitCount ( m_uWordCount[i] );
if ( m_dMinIDF[i] > m_dMaxIDF[i] )
m_dMinIDF[i] = m_dMaxIDF[i] = 0; // must be FLT_MAX vs -FLT_MAX, aka no hits
}
m_uDocWordCount = sphBitCount ( m_uDocWordCount );
// compute real BM25
// with blackjack, and hookers, and field lengths, and parameters
//
// canonical idf = log ( (N-n+0.5) / (n+0.5) )
// sphinx idf = log ( (N-n+1) / n )
// and we also downscale our idf by 1/log(N+1) to map it into [-0.5, 0.5] range
// compute document length
float dl = 0; // OPTIMIZE? could precompute and store total dl in attrs, but at a storage cost
CSphAttrLocator tLoc = m_tFieldLensLoc;
if ( tLoc.m_iBitOffset>=0 )
for ( int i=0; i<m_iFields; i++ )
{
dl += tMatch.GetAttr ( tLoc );
tLoc.m_iBitOffset += 32;
}
// compute BM25A (one value per document)
m_fDocBM25A = 0.0f;
for ( int iWord=1; iWord<=m_iMaxQpos; iWord++ )
{
if ( IsTermSkipped ( iWord ) )
continue;
auto tf = (float)m_dTF[iWord]; // OPTIMIZE? remove this vector, hook into m_uMatchHits somehow?
float idf = m_dIDF[iWord];
m_fDocBM25A += tf / (tf + m_fParamK1*(1 - m_fParamB + m_fParamB*dl/m_fAvgDocLen)) * idf;
}
m_fDocBM25A += 0.5f; // map to [0..1] range
}
/// reset per-document factors, prepare for the next document
void ResetDocFactors()
{
// OPTIMIZE? quick full wipe? (using dwords/sse/whatever)
m_uCurLCS = 0;
if_const ( HANDLE_DUPES )
{
m_uCurPos = 0;
m_uLcsTailPos = 0;
m_uLcsTailQposMask = 0;
m_uCurQposMask = 0;
m_uLastSpanStart = 0;
}
m_iExpDelta = -1;
m_iLastHitPos = -1;
for ( int i=0; i<m_iFields; i++ )
{
m_uLCS[i] = 0;
m_uHitCount[i] = 0;
m_uWordCount[i] = 0;
m_dMinIDF[i] = FLT_MAX;
m_dMaxIDF[i] = -FLT_MAX;
m_dSumIDF[i] = 0;
m_dTFIDF[i] = 0;
m_iMinHitPos[i] = 0;
m_iMinBestSpanPos[i] = 0;
m_iMaxWindowHits[i] = 0;
m_iMinGaps[i] = 0;
m_dAtc[i] = 0.0f;
}
m_dTF.Fill ( 0 );
m_dFieldTF.Fill ( 0 ); // OPTIMIZE? make conditional?
m_tMatchedFields.Clear();
m_tExactHit.Clear();
m_tExactOrder.Clear();
m_uDocWordCount = 0;
m_dWindow.Resize ( 0 );
m_fDocBM25A = 0;
m_dMinWindowHits.Resize ( 0 );
m_dMinWindowCounts.Fill ( 0 );
m_iMinWindowWords = 0;
m_iLastField = -1;
m_iLastQuerypos = 0;
m_iExactOrderWords = 0;
m_dAtcTerms.Fill ( 0.0f );
m_iAtcHitStart = 0;
m_iAtcHitCount = 0;
m_uAtcField = 0;
if_const ( HANDLE_DUPES )
m_dTermsHit.Fill ( EMPTY_HIT );
}
void FlushMatches ()
{
m_tFactorPool.Flush ();
}
protected:
inline void UpdateGap ( int iField, int iWords, int iGap )
{
if ( m_iMinWindowWords<iWords || ( m_iMinWindowWords==iWords && m_iMinGaps[iField]>iGap ) )
{
m_iMinGaps[iField] = iGap;
m_iMinWindowWords = iWords;
}
}
void UpdateMinGaps ( const ExtHit_t * pHlist );
void UpdateFreq ( WORD uQpos, DWORD uField );
private:
bool ExtraDataImpl ( ExtraData_e eType, void ** ppResult ) override;
int GetMaxPackedLength();
BYTE * PackFactors();
};
/// extra expression ranker node types
enum ExprRankerNode_e
{
// field level factors
XRANK_LCS,
XRANK_USER_WEIGHT,
XRANK_HIT_COUNT,
XRANK_WORD_COUNT,
XRANK_TF_IDF,
XRANK_MIN_IDF,
XRANK_MAX_IDF,
XRANK_SUM_IDF,
XRANK_MIN_HIT_POS,
XRANK_MIN_BEST_SPAN_POS,
XRANK_EXACT_HIT,
XRANK_EXACT_ORDER,
XRANK_MAX_WINDOW_HITS,
XRANK_MIN_GAPS,
XRANK_LCCS,
XRANK_WLCCS,
XRANK_ATC,
// document level factors
XRANK_BM25,
XRANK_MAX_LCS,
XRANK_FIELD_MASK,
XRANK_QUERY_WORD_COUNT,
XRANK_DOC_WORD_COUNT,
XRANK_BM25A,
XRANK_BM25F,
// field aggregation functions
XRANK_SUM,
XRANK_TOP
};
/// generic field factor
template < typename T >
struct Expr_FieldFactor_c : public Expr_NoLocator_c
{
const int * m_pIndex;
const T * m_pData;
Expr_FieldFactor_c ( const int * pIndex, const T * pData )
: m_pIndex ( pIndex )
, m_pData ( pData )
{}
float Eval ( const CSphMatch & ) const final
{
return (float) m_pData [ *m_pIndex ];
}
int IntEval ( const CSphMatch & ) const final
{
return (int) m_pData [ *m_pIndex ];
}
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & ) final
{
assert ( 0 && "ranker expressions in filters" );
return 0;
}
ISphExpr* Clone () const final
{
assert ( 0 && "cloning ranker expressions is not expected now" );
return nullptr; //new Expr_FieldFactor_c ( *this );
}
private:
Expr_FieldFactor_c ( const Expr_FieldFactor_c& rhs )
: m_pIndex ( rhs.m_pIndex )
, m_pData ( rhs.m_pData )
{}
};
/// bitmask field factor specialization
template<>
struct Expr_FieldFactor_c<bool> : public Expr_NoLocator_c
{
const int * m_pIndex;
const DWORD * m_pData;
Expr_FieldFactor_c ( const int * pIndex, const DWORD * pData )
: m_pIndex ( pIndex )
, m_pData ( pData )
{}
float Eval ( const CSphMatch & ) const final
{
return (float)( (*m_pData) >> (*m_pIndex) );
}
int IntEval ( const CSphMatch & ) const final
{
return (int)( (*m_pData) >> (*m_pIndex) );
}
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & ) final
{
assert ( 0 && "ranker expressions in filters" );
return 0;
}
};
/// generic per-document int factor
struct Expr_IntPtr_c : public Expr_NoLocator_c
{
DWORD * m_pVal;
explicit Expr_IntPtr_c ( DWORD * pVal )
: m_pVal ( pVal )
{}
float Eval ( const CSphMatch & ) const final
{
return (float)*m_pVal;
}
int IntEval ( const CSphMatch & ) const final
{
return (int)*m_pVal;
}
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & ) final
{
assert ( 0 && "ranker expressions in filters" );
return 0;
}
ISphExpr * Clone () const final
{
assert ( 0 && "cloning ranker expressions is not expected now" );
return nullptr; //new Expr_IntPtr_c ( *this );
}
private:
Expr_IntPtr_c ( const Expr_IntPtr_c& rhs ) : m_pVal ( rhs.m_pVal ) {}
};
/// per-document field mask factor
struct Expr_FieldMask_c : public Expr_NoLocator_c
{
const CSphBitvec & m_tFieldMask;
explicit Expr_FieldMask_c ( const CSphBitvec & tFieldMask )
: m_tFieldMask ( tFieldMask )
{}
float Eval ( const CSphMatch & ) const final
{
return (float)*m_tFieldMask.Begin();
}
int IntEval ( const CSphMatch & ) const final
{
return (int)*m_tFieldMask.Begin();
}
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & ) final
{
assert ( 0 && "ranker expressions in filters" );
return 0;
}
ISphExpr * Clone () const final
{
assert ( 0 && "cloning ranker expressions is not expected now" );
return nullptr; //new Expr_FieldMask_c ( *this );
}
private:
Expr_FieldMask_c ( const Expr_FieldMask_c& rhs ) : m_tFieldMask ( rhs.m_tFieldMask ) {}
};
/// bitvec field factor specialization
template<>
struct Expr_FieldFactor_c<CSphBitvec> : public Expr_NoLocator_c
{
const int * m_pIndex;
const CSphBitvec & m_tField;
Expr_FieldFactor_c ( const int * pIndex, const CSphBitvec & tField )
: m_pIndex ( pIndex )
, m_tField ( tField )
{}
float Eval ( const CSphMatch & ) const final
{
return (float)( m_tField.BitGet ( *m_pIndex ) );
}
int IntEval ( const CSphMatch & ) const final
{
return (int)( m_tField.BitGet ( *m_pIndex ) );
}
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & ) final
{
assert ( 0 && "ranker expressions in filters" );
return 0;
}
ISphExpr * Clone () const final
{
assert ( 0 && "cloning ranker expressions is not expected now" );
return nullptr; // new Expr_FieldFactor_c<CSphBitvec> ( *this );
}
private:
Expr_FieldFactor_c<CSphBitvec> ( const Expr_FieldFactor_c<CSphBitvec>& rhs )
: m_pIndex ( rhs.m_pIndex ), m_tField ( rhs.m_tField ) {}
};
/// generic per-document float factor
struct Expr_FloatPtr_c : public Expr_NoLocator_c
{
float * m_pVal;
explicit Expr_FloatPtr_c ( float * pVal )
: m_pVal ( pVal )
{}
float Eval ( const CSphMatch & ) const final
{
return (float)*m_pVal;
}
int IntEval ( const CSphMatch & ) const final
{
return (int)*m_pVal;
}
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & ) final
{
assert ( 0 && "ranker expressions in filters" );
return 0;
}
ISphExpr * Clone () const final
{
assert ( 0 && "cloning ranker expressions is not expected now" );
return nullptr; // new Expr_FloatPtr_c ( *this );
}
private:
Expr_FloatPtr_c ( const Expr_FloatPtr_c& rhs )
: m_pVal ( rhs.m_pVal )
{}
};
template < bool NEED_PACKEDFACTORS, bool HANDLE_DUPES >
struct Expr_BM25F_T : public Expr_NoLocator_c
{
RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES> * m_pState;
float m_fK1;
float m_fB;
CSphFixedVector<int> m_dWeights { 0 }; ///< per field weights
CSphFixedVector<float> m_dAvgDocFieldLens { 0 }; ///< per field avg lengths
mutable CSphFixedVector<int> m_dFieldLens { 0 }; ///< per field lengths
int m_iWeightMax; ///< the largest field weight
explicit Expr_BM25F_T ( RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES> * pState, float k1, float b, ISphExpr * pFieldWeights )
{
// bind k1, b
m_pState = pState;
m_fK1 = k1;
m_fB = b;
m_dWeights.Reset ( pState->m_iFields );
m_dAvgDocFieldLens.Reset ( pState->m_iFields );
m_dFieldLens.Reset ( pState->m_iFields );
// bind weights
m_iWeightMax = 1;
m_dWeights.Fill ( 1 );
if ( pFieldWeights )
{
auto pMapArg = ( Expr_MapArg_c * ) pFieldWeights;
VecTraits_T<CSphNamedVariant> dOpts ( pMapArg->m_pValues, pMapArg->m_iCount );
for ( const auto& dOpt : dOpts )
{
// FIXME? report errors if field was not found?
if ( !dOpt.m_sValue.IsEmpty() )
continue; // weights must be int, not string
const CSphString & sField = dOpt.m_sKey;
int iField = pState->m_pSchema->GetFieldIndex ( sField.cstr() );
if ( iField>=0 )
{
m_dWeights[iField] = dOpt.m_iValue;
if ( dOpt.m_iValue > m_iWeightMax )
m_iWeightMax = dOpt.m_iValue;
}
}
}
// compute avg length per field
m_dAvgDocFieldLens.Fill ( 0.0f );
if ( m_pState->m_pFieldLens )
for ( int i=0; i<m_pState->m_iFields; i++ )
m_dAvgDocFieldLens[i] = m_pState->m_pFieldLens[i] / m_pState->m_iTotalDocuments; // FIXME? Total of documents with non empty field value is actually needed here
}
float Eval ( const CSphMatch & tMatch ) const final
{
// compute document field lengths
// OPTIMIZE? could precompute and store total dl in attrs, but at a storage cost
// OPTIMIZE? could at least share between multiple BM25F instances, if there are many
CSphAttrLocator tLoc = m_pState->m_tFieldLensLoc;
m_dFieldLens.Fill ( 0 );
if ( tLoc.m_iBitOffset>=0 )
for ( int i=0; i<m_pState->m_iFields; i++ )
{
m_dFieldLens[i] = tMatch.GetAttr ( tLoc );
tLoc.m_iBitOffset += 32;
}
// compute (the current instance of) BM25F
float fRes = 0.0f;
for ( int iWord=1; iWord<=m_pState->m_iMaxQpos; iWord++ )
{
if ( m_pState->IsTermSkipped ( iWord ) )
continue;
// compute weighted TF
float fIDF = m_pState->m_dIDF[iWord]; // FIXME? zeroed out for dupes!
for ( int i=0; i<m_pState->m_iFields; i++ )
{
int fFieldTF = m_pState->m_dFieldTF [ iWord + i*(1+m_pState->m_iMaxQpos) ];
if ( m_dAvgDocFieldLens[i]>0.0f && fFieldTF>0 )
fRes += (m_dWeights[i] / (float)m_iWeightMax) * fIDF * fFieldTF * (m_fK1 + 1.0f) / (fFieldTF + m_fK1 * (1.0f - m_fB + m_fB * m_dFieldLens[i] / m_dAvgDocFieldLens[i]) );
}
}
return fRes + 0.5f; // map to [0..1] range
}
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & ) final
{
assert ( 0 && "ranker expressions in filters" );
return 0;
}
ISphExpr * Clone () const final
{
assert ( 0 && "cloning ranker expressions is not expected now" );
return nullptr; // new Expr_BM25F_T ( *this );
}
private:
Expr_BM25F_T ( const Expr_BM25F_T& rhs )
: m_pState ( rhs.m_pState )
, m_fK1 ( rhs.m_fK1 )
, m_fB ( rhs.m_fB )
, m_dWeights ( rhs.m_dWeights )
, m_dAvgDocFieldLens ( rhs.m_dAvgDocFieldLens )
, m_dFieldLens ( rhs.m_dFieldLens )
, m_iWeightMax ( rhs.m_iWeightMax )
{}
};
/// function that sums sub-expressions over matched fields
template < bool NEED_PACKEDFACTORS, bool HANDLE_DUPES >
struct Expr_Sum_T : public ISphExpr
{
RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES> * m_pState;
CSphRefcountedPtr<ISphExpr> m_pArg;
Expr_Sum_T ( RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES> * pState, ISphExpr * pArg )
: m_pState ( pState )
, m_pArg ( pArg )
{
SafeAddRef ( pArg );
}
float Eval ( const CSphMatch & tMatch ) const final
{
m_pState->m_iCurrentField = 0;
float fRes = 0;
const CSphBitvec & tFields = m_pState->m_tMatchedFields;
int iBits = tFields.BitCount();
while ( iBits )
{
if ( tFields.BitGet ( m_pState->m_iCurrentField ) )
{
fRes += m_pArg->Eval ( tMatch );
iBits--;
}
m_pState->m_iCurrentField++;
}
return fRes;
}
int IntEval ( const CSphMatch & tMatch ) const final
{
m_pState->m_iCurrentField = 0;
int iRes = 0;
const CSphBitvec & tFields = m_pState->m_tMatchedFields;
int iBits = tFields.BitCount();
while ( iBits )
{
if ( tFields.BitGet ( m_pState->m_iCurrentField ) )
{
iRes += m_pArg->IntEval ( tMatch );
iBits--;
}
m_pState->m_iCurrentField++;
}
return iRes;
}
void FixupLocator ( const ISphSchema * /*pOldSchema*/, const ISphSchema * /*pNewSchema*/ ) final
{
assert ( 0 && "ranker expressions in filters" );
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
assert ( m_pArg );
m_pArg->Command ( eCmd, pArg );
}
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & ) final
{
assert ( 0 && "ranker expressions in filters" );
return 0;
}
ISphExpr * Clone () const final
{
assert ( 0 && "cloning ranker expressions is not expected now" );
return nullptr; // new Expr_Sum_T ( *this );
}
private:
Expr_Sum_T ( const Expr_Sum_T& rhs )
: m_pState ( rhs.m_pState ) // fixme!
, m_pArg ( SafeClone (rhs.m_pArg ) ) {}
};
/// aggregate max over matched fields
template < bool NEED_PACKEDFACTORS, bool HANDLE_DUPES >
struct Expr_Top_T : public ISphExpr
{
RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES> * m_pState;
CSphRefcountedPtr<ISphExpr> m_pArg;
Expr_Top_T ( RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES> * pState, ISphExpr * pArg )
: m_pState ( pState )
, m_pArg ( pArg )
{
SafeAddRef ( pArg );
}
float Eval ( const CSphMatch & tMatch ) const final
{
m_pState->m_iCurrentField = 0;
float fRes = FLT_MIN;
const CSphBitvec & tFields = m_pState->m_tMatchedFields;
int iBits = tFields.BitCount();
while ( iBits )
{
if ( tFields.BitGet ( m_pState->m_iCurrentField ) )
{
fRes = Max ( fRes, m_pArg->Eval ( tMatch ) );
iBits--;
}
m_pState->m_iCurrentField++;
}
return fRes;
}
int IntEval ( const CSphMatch & tMatch ) const final
{
m_pState->m_iCurrentField = 0;
int iRes = INT_MIN;
const CSphBitvec & tFields = m_pState->m_tMatchedFields;
int iBits = tFields.BitCount();
while ( iBits )
{
if ( tFields.BitGet ( m_pState->m_iCurrentField ) )
{
iRes = Max ( iRes, m_pArg->IntEval ( tMatch ) );
iBits--;
}
m_pState->m_iCurrentField++;
}
return iRes;
}
void FixupLocator ( const ISphSchema * /*pOldSchema*/, const ISphSchema * /*pNewSchema*/ ) final
{
assert ( 0 && "ranker expressions in filters" );
}
void Command ( ESphExprCommand eCmd, void * pArg ) final
{
assert ( m_pArg );
m_pArg->Command ( eCmd, pArg );
}
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & ) final
{
assert ( 0 && "ranker expressions in filters" );
return 0;
}
ISphExpr * Clone () const final
{
assert ( 0 && "cloning ranker expressions is not expected now" );
return nullptr; // new Expr_Top_T ( *this );
}
private:
Expr_Top_T ( const Expr_Top_T& rhs )
: m_pState ( rhs.m_pState ) // fixme!
, m_pArg ( SafeClone ( rhs.m_pArg ) ) {}
};
// FIXME! cut/pasted from sphinxexpr; remove dupe
struct Expr_GetIntConst_Rank_c : public Expr_NoLocator_c
{
int m_iValue;
explicit Expr_GetIntConst_Rank_c ( int iValue ) : m_iValue ( iValue ) {}
float Eval ( const CSphMatch & ) const final { return (float) m_iValue; } // no assert() here cause generic float Eval() needs to work even on int-evaluator tree
int IntEval ( const CSphMatch & ) const final { return m_iValue; }
int64_t Int64Eval ( const CSphMatch & ) const final { return m_iValue; }
uint64_t GetHash ( const ISphSchema &, uint64_t, bool & ) final
{
assert ( 0 && "ranker expressions in filters" );
return 0;
}
ISphExpr * Clone () const final
{
assert ( 0 && "cloning ranker expressions is not expected now" );
return nullptr; // new Expr_GetIntConst_Rank_c ( *this );
}
private:
Expr_GetIntConst_Rank_c ( const Expr_GetIntConst_Rank_c& rhs ) : m_iValue ( rhs.m_iValue ) {}
};
/// hook that exposes field-level factors, document-level factors, and matched field SUM() function to generic expressions
template < bool NEED_PACKEDFACTORS, bool HANDLE_DUPES >
class ExprRankerHook_T : public ISphExprHook
{
public:
RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES> * m_pState;
const char * m_sCheckError = nullptr;
bool m_bCheckInFieldAggr = false;
public:
explicit ExprRankerHook_T ( RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES> * pState )
: m_pState ( pState )
{}
int IsKnownIdent ( const char * sIdent ) const final
{
// OPTIMIZE? hash this some nice long winter night?
if ( !strcasecmp ( sIdent, "lcs" ) )
return XRANK_LCS;
if ( !strcasecmp ( sIdent, "user_weight" ) )
return XRANK_USER_WEIGHT;
if ( !strcasecmp ( sIdent, "hit_count" ) )
return XRANK_HIT_COUNT;
if ( !strcasecmp ( sIdent, "word_count" ) )
return XRANK_WORD_COUNT;
if ( !strcasecmp ( sIdent, "tf_idf" ) )
return XRANK_TF_IDF;
if ( !strcasecmp ( sIdent, "min_idf" ) )
return XRANK_MIN_IDF;
if ( !strcasecmp ( sIdent, "max_idf" ) )
return XRANK_MAX_IDF;
if ( !strcasecmp ( sIdent, "sum_idf" ) )
return XRANK_SUM_IDF;
if ( !strcasecmp ( sIdent, "min_hit_pos" ) )
return XRANK_MIN_HIT_POS;
if ( !strcasecmp ( sIdent, "min_best_span_pos" ) )
return XRANK_MIN_BEST_SPAN_POS;
if ( !strcasecmp ( sIdent, "exact_hit" ) )
return XRANK_EXACT_HIT;
if ( !strcasecmp ( sIdent, "exact_order" ) )
return XRANK_EXACT_ORDER;
if ( !strcasecmp ( sIdent, "bm25" ) )
return XRANK_BM25;
if ( !strcasecmp ( sIdent, "max_lcs" ) )
return XRANK_MAX_LCS;
if ( !strcasecmp ( sIdent, "field_mask" ) )
return XRANK_FIELD_MASK;
if ( !strcasecmp ( sIdent, "query_word_count" ) )
return XRANK_QUERY_WORD_COUNT;
if ( !strcasecmp ( sIdent, "doc_word_count" ) )
return XRANK_DOC_WORD_COUNT;
if ( !strcasecmp ( sIdent, "min_gaps" ) )
return XRANK_MIN_GAPS;
if ( !strcasecmp ( sIdent, "lccs" ) )
return XRANK_LCCS;
if ( !strcasecmp ( sIdent, "wlccs" ) )
return XRANK_WLCCS;
if ( !strcasecmp ( sIdent, "atc" ) )
return XRANK_ATC;
return -1;
}
int IsKnownFunc ( const char * sFunc ) const final
{
if ( !strcasecmp ( sFunc, "sum" ) )
return XRANK_SUM;
if ( !strcasecmp ( sFunc, "top" ) )
return XRANK_TOP;
if ( !strcasecmp ( sFunc, "max_window_hits" ) )
return XRANK_MAX_WINDOW_HITS;
if ( !strcasecmp ( sFunc, "bm25a" ) )
return XRANK_BM25A;
if ( !strcasecmp ( sFunc, "bm25f" ) )
return XRANK_BM25F;
return -1;
}
ISphExpr * CreateNode ( int iID, ISphExpr * _pLeft, const ISphSchema *, ESphEvalStage *, bool *, CSphString & ) final
{
SafeAddRef ( _pLeft );
CSphRefcountedPtr<ISphExpr> pLeft ( _pLeft );
int * pCF = &m_pState->m_iCurrentField; // just a shortcut
switch ( iID )
{
case XRANK_LCS: return new Expr_FieldFactor_c<BYTE> ( pCF, m_pState->m_uLCS.Begin() );
case XRANK_USER_WEIGHT: return new Expr_FieldFactor_c<int> ( pCF, m_pState->m_pWeights );
case XRANK_HIT_COUNT: return new Expr_FieldFactor_c<DWORD> ( pCF, m_pState->m_uHitCount.Begin() );
case XRANK_WORD_COUNT: return new Expr_FieldFactor_c<DWORD> ( pCF, m_pState->m_uWordCount.Begin() );
case XRANK_TF_IDF: return new Expr_FieldFactor_c<float> ( pCF, m_pState->m_dTFIDF.Begin() );
case XRANK_MIN_IDF: return new Expr_FieldFactor_c<float> ( pCF, m_pState->m_dMinIDF.Begin() );
case XRANK_MAX_IDF: return new Expr_FieldFactor_c<float> ( pCF, m_pState->m_dMaxIDF.Begin() );
case XRANK_SUM_IDF: return new Expr_FieldFactor_c<float> ( pCF, m_pState->m_dSumIDF.Begin() );
case XRANK_MIN_HIT_POS: return new Expr_FieldFactor_c<int> ( pCF, m_pState->m_iMinHitPos.Begin() );
case XRANK_MIN_BEST_SPAN_POS: return new Expr_FieldFactor_c<int> ( pCF, m_pState->m_iMinBestSpanPos.Begin() );
case XRANK_EXACT_HIT: return new Expr_FieldFactor_c<CSphBitvec> ( pCF, m_pState->m_tExactHit );
case XRANK_EXACT_ORDER: return new Expr_FieldFactor_c<CSphBitvec> ( pCF, m_pState->m_tExactOrder );
case XRANK_MAX_WINDOW_HITS:
{
CSphMatch tDummy;
m_pState->m_iWindowSize = pLeft->IntEval ( tDummy ); // must be constant; checked in GetReturnType()
return new Expr_FieldFactor_c<int> ( pCF, m_pState->m_iMaxWindowHits.Begin() );
}
case XRANK_MIN_GAPS: return new Expr_FieldFactor_c<int> ( pCF, m_pState->m_iMinGaps.Begin() );
case XRANK_LCCS: return new Expr_FieldFactor_c<BYTE> ( pCF, m_pState->m_dLCCS.Begin() );
case XRANK_WLCCS: return new Expr_FieldFactor_c<float> ( pCF, m_pState->m_dWLCCS.Begin() );
case XRANK_ATC:
m_pState->m_bWantAtc = true;
return new Expr_FieldFactor_c<float> ( pCF, m_pState->m_dAtc.Begin() );
case XRANK_BM25: return new Expr_IntPtr_c ( &m_pState->m_uDocBM25 );
case XRANK_MAX_LCS: return new Expr_GetIntConst_Rank_c ( m_pState->m_iMaxLCS );
case XRANK_FIELD_MASK: return new Expr_FieldMask_c ( m_pState->m_tMatchedFields );
case XRANK_QUERY_WORD_COUNT: return new Expr_GetIntConst_Rank_c ( m_pState->m_iQueryWordCount );
case XRANK_DOC_WORD_COUNT: return new Expr_IntPtr_c ( &m_pState->m_uDocWordCount );
case XRANK_BM25A:
{
// exprs we'll evaluate here must be constant; that is checked in GetReturnType()
// so having a dummy match with no data work alright
assert ( pLeft->IsArglist() );
CSphMatch tDummy;
m_pState->m_fParamK1 = pLeft->GetArg(0)->Eval ( tDummy );
m_pState->m_fParamB = pLeft->GetArg(1)->Eval ( tDummy );
m_pState->m_fParamK1 = Max ( m_pState->m_fParamK1, 0.001f );
m_pState->m_fParamB = Min ( Max ( m_pState->m_fParamB, 0.0f ), 1.0f );
return new Expr_FloatPtr_c ( &m_pState->m_fDocBM25A );
}
case XRANK_BM25F:
{
assert ( pLeft->IsArglist() );
CSphMatch tDummy;
float fK1 = pLeft->GetArg(0)->Eval ( tDummy );
float fB = pLeft->GetArg(1)->Eval ( tDummy );
fK1 = Max ( fK1, 0.001f );
fB = Min ( Max ( fB, 0.0f ), 1.0f );
return new Expr_BM25F_T<NEED_PACKEDFACTORS, HANDLE_DUPES> ( m_pState, fK1, fB, pLeft->GetArg(2) );
}
case XRANK_SUM: return new Expr_Sum_T<NEED_PACKEDFACTORS, HANDLE_DUPES> ( m_pState, pLeft );
case XRANK_TOP: return new Expr_Top_T<NEED_PACKEDFACTORS, HANDLE_DUPES> ( m_pState, pLeft );
default: return nullptr;
}
}
ESphAttr GetIdentType ( int iID ) const final
{
switch ( iID )
{
case XRANK_LCS: // field-level
case XRANK_USER_WEIGHT:
case XRANK_HIT_COUNT:
case XRANK_WORD_COUNT:
case XRANK_MIN_HIT_POS:
case XRANK_MIN_BEST_SPAN_POS:
case XRANK_EXACT_HIT:
case XRANK_EXACT_ORDER:
case XRANK_MAX_WINDOW_HITS:
case XRANK_BM25: // doc-level
case XRANK_MAX_LCS:
case XRANK_FIELD_MASK:
case XRANK_QUERY_WORD_COUNT:
case XRANK_DOC_WORD_COUNT:
case XRANK_MIN_GAPS:
case XRANK_LCCS:
return SPH_ATTR_INTEGER;
case XRANK_TF_IDF:
case XRANK_MIN_IDF:
case XRANK_MAX_IDF:
case XRANK_SUM_IDF:
case XRANK_WLCCS:
case XRANK_ATC:
return SPH_ATTR_FLOAT;
default:
assert ( 0 );
return SPH_ATTR_INTEGER;
}
}
/// helper to check argument types by a signature string (passed in sArgs)
/// every character in the signature describes a type
/// ? = any type
/// i = integer
/// I = integer constant
/// f = float
/// s = scalar (int/float)
/// h = hash
/// signature can also be preceded by "c:" modifier than means that all arguments must be constant
bool CheckArgtypes ( const CSphVector<ESphAttr> & dArgs, const char * sFuncname, const char * sArgs, bool bAllConst, CSphString & sError ) const
{
if ( sArgs[0]=='c' && sArgs[1]==':' )
{
if ( !bAllConst )
{
sError.SetSprintf ( "%s() requires constant arguments", sFuncname );
return false;
}
sArgs += 2;
}
auto iLen = (int)strlen ( sArgs );
if ( dArgs.GetLength()!=iLen )
{
sError.SetSprintf ( "%s() requires %d argument(s), not %d", sFuncname, iLen, dArgs.GetLength() );
return false;
}
ARRAY_FOREACH ( i, dArgs )
{
switch ( *sArgs++ )
{
case '?':
break;
case 'i':
if ( dArgs[i]!=SPH_ATTR_INTEGER )
{
sError.SetSprintf ( "argument %d to %s() must be integer", i, sFuncname );
return false;
}
break;
case 's':
if ( dArgs[i]!=SPH_ATTR_INTEGER && dArgs[i]!=SPH_ATTR_FLOAT )
{
sError.SetSprintf ( "argument %d to %s() must be scalar (integer or float)", i, sFuncname );
return false;
}
break;
case 'h':
if ( dArgs[i]!=SPH_ATTR_MAPARG )
{
sError.SetSprintf ( "argument %d to %s() must be a map of constants", i, sFuncname );
return false;
}
break;
default:
assert ( 0 && "unknown signature code" );
break;
}
}
// this is important!
// other previous failed checks might have filled sError
// and if anything else up the stack checks it, we need an empty message now
sError = "";
return true;
}
ESphAttr GetReturnType ( int iID, const CSphVector<ESphAttr> & dArgs, bool bAllConst, CSphString & sError ) const final
{
switch ( iID )
{
case XRANK_SUM:
if ( !CheckArgtypes ( dArgs, "SUM", "?", bAllConst, sError ) )
return SPH_ATTR_NONE;
return dArgs[0];
case XRANK_TOP:
if ( !CheckArgtypes ( dArgs, "TOP", "?", bAllConst, sError ) )
return SPH_ATTR_NONE;
return dArgs[0];
case XRANK_MAX_WINDOW_HITS:
if ( !CheckArgtypes ( dArgs, "MAX_WINDOW_HITS", "c:i", bAllConst, sError ) )
return SPH_ATTR_NONE;
return SPH_ATTR_INTEGER;
case XRANK_BM25A:
if ( !CheckArgtypes ( dArgs, "BM25A", "c:ss", bAllConst, sError ) )
return SPH_ATTR_NONE;
return SPH_ATTR_FLOAT;
case XRANK_BM25F:
if ( !CheckArgtypes ( dArgs, "BM25F", "c:ss", bAllConst, sError ) )
if ( !CheckArgtypes ( dArgs, "BM25F", "c:ssh", bAllConst, sError ) )
return SPH_ATTR_NONE;
return SPH_ATTR_FLOAT;
default:
sError.SetSprintf ( "internal error: unknown hook function (id=%d)", iID );
}
return SPH_ATTR_NONE;
}
void CheckEnter ( int iID ) final
{
if ( !m_sCheckError )
switch ( iID )
{
case XRANK_LCS:
case XRANK_USER_WEIGHT:
case XRANK_HIT_COUNT:
case XRANK_WORD_COUNT:
case XRANK_TF_IDF:
case XRANK_MIN_IDF:
case XRANK_MAX_IDF:
case XRANK_SUM_IDF:
case XRANK_MIN_HIT_POS:
case XRANK_MIN_BEST_SPAN_POS:
case XRANK_EXACT_HIT:
case XRANK_MAX_WINDOW_HITS:
case XRANK_LCCS:
case XRANK_WLCCS:
if ( !m_bCheckInFieldAggr )
m_sCheckError = "field factors must only occur within field aggregation functions in ranking expression";
break;
case XRANK_SUM:
case XRANK_TOP:
if ( m_bCheckInFieldAggr )
m_sCheckError = "field aggregates can not be nested in ranking expression";
else
m_bCheckInFieldAggr = true;
break;
default:
assert ( iID>=0 );
return;
}
}
void CheckExit ( int iID ) final
{
if ( !m_sCheckError && ( iID==XRANK_SUM || iID==XRANK_TOP ) )
{
assert ( m_bCheckInFieldAggr );
m_bCheckInFieldAggr = false;
}
}
};
/// initialize ranker state
template < bool NEED_PACKEDFACTORS, bool HANDLE_DUPES >
bool RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES>::Init ( int iFields, const int * pWeights, ExtRanker_T<true> * pRanker, CSphString & sError, DWORD uFactorFlags )
{
m_iFields = iFields;
m_pWeights = pWeights;
m_uDocBM25 = 0;
m_tMatchedFields.Init ( iFields );
m_tExactHit.Init ( iFields );
m_tExactOrder.Init ( iFields );
m_iCurrentField = 0;
m_iMaxQpos = pRanker->m_iMaxQpos; // already copied in SetQwords, but anyway
m_iWindowSize = 1;
m_iHaveMinWindow = 0;
m_dMinWindowHits.Reserve ( Max ( m_iMaxQpos, 32 ) );
m_iQueryPosLCCS = 0;
m_iHitPosLCCS = 0;
m_iLenLCCS = 0;
m_fWeightLCCS = 0.0f;
m_dAtcTerms.Reset ( m_iMaxQpos + 1 );
m_dAtcProcessedTerms.Init ( m_iMaxQpos + 1 );
m_bAtcHeadProcessed = false;
m_uLCS.Reset ( iFields );
m_uHitCount.Reset ( iFields );
m_uWordCount.Reset ( iFields );
m_dTFIDF.Reset ( iFields );
m_dMinIDF.Reset ( iFields );
m_dMaxIDF.Reset ( iFields );
m_dSumIDF.Reset ( iFields );
m_iMinHitPos.Reset ( iFields );
m_iMinBestSpanPos.Reset ( iFields );
m_iMaxWindowHits.Reset ( iFields );
m_iMinGaps.Reset ( iFields );
m_dLCCS.Reset ( iFields );
m_dLCCS.Fill ( 0 );
m_dWLCCS.Reset ( iFields );
m_dWLCCS.Fill ( 0 );
m_dAtc.Reset ( iFields );
ResetDocFactors();
// compute query level constants
// max_lcs, aka m_iMaxLCS (for matchany ranker emulation) gets computed here
// query_word_count, aka m_iQueryWordCount is set elsewhere (in SetQwordsIDF())
m_iMaxLCS = 0;
for ( int i=0; i<iFields; i++ )
m_iMaxLCS += pWeights[i] * pRanker->m_iQwords;
for ( int i=0; i<m_pSchema->GetAttrsCount(); i++ )
{
if ( m_pSchema->GetAttr(i).m_eAttrType!=SPH_ATTR_TOKENCOUNT )
continue;
m_tFieldLensLoc = m_pSchema->GetAttr(i).m_tLocator;
break;
}
m_fAvgDocLen = 0.0f;
m_pFieldLens = pRanker->GetIndex()->GetFieldLens();
if ( m_pFieldLens )
for ( int i=0; i<iFields; i++ )
m_fAvgDocLen += m_pFieldLens[i];
else
m_fAvgDocLen = 1.0f;
m_iTotalDocuments = pRanker->GetCtx()->m_iTotalDocs;
m_fAvgDocLen /= m_iTotalDocuments;
m_fParamK1 = 1.2f;
m_fParamB = 0.75f;
// not in SetQwords, because we only get iFields here
m_dFieldTF.Reset ( m_iFields*(m_iMaxQpos+1) );
m_dFieldTF.Fill ( 0 );
ExprRankerHook_T<NEED_PACKEDFACTORS, HANDLE_DUPES> tHook ( this );
// parse expression
bool bUsesWeight;
ExprParseArgs_t tExprArgs;
tExprArgs.m_pAttrType = &m_eExprType;
tExprArgs.m_pUsesWeight = &bUsesWeight;
tExprArgs.m_pHook = &tHook;
m_pExpr = sphExprParse ( m_sExpr, *m_pSchema, nullptr, sError, tExprArgs ); // FIXME!!! profile UDF here too
if ( !m_pExpr )
return false;
if ( m_eExprType!=SPH_ATTR_INTEGER && m_eExprType!=SPH_ATTR_FLOAT )
{
sError = "ranking expression must evaluate to integer or float";
return false;
}
if ( bUsesWeight )
{
sError = "ranking expression must not refer to WEIGHT()";
return false;
}
if ( tHook.m_sCheckError )
{
sError = tHook.m_sCheckError;
return false;
}
int iUniq = m_iMaxQpos;
if_const ( HANDLE_DUPES )
{
iUniq = 0;
ARRAY_FOREACH ( i, m_dTermDupes )
iUniq += ( IsTermSkipped(i) ? 0 : 1 );
}
m_iHaveMinWindow = iUniq;
// we either have an ATC factor in the expression or packedfactors() without no_atc=1
if ( m_bWantAtc || ( uFactorFlags & SPH_FACTOR_CALC_ATC ) )
m_bHaveAtc = ( iUniq>1 );
// all seems ok
return true;
}
/// process next hit, update factors
template < bool NEED_PACKEDFACTORS, bool HANDLE_DUPES >
void RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES>::Update ( const ExtHit_t * pHlist )
{
const DWORD uField = HITMAN::GetField ( pHlist->m_uHitpos );
const int iPos = HITMAN::GetPos ( pHlist->m_uHitpos );
const DWORD uPosWithField = HITMAN::GetPosWithField ( pHlist->m_uHitpos );
if_const ( !HANDLE_DUPES )
{
// update LCS
int iDelta = uPosWithField - pHlist->m_uQuerypos;
if ( iDelta==m_iExpDelta )
{
if ( (int)uPosWithField>m_iLastHitPos )
m_uCurLCS = (BYTE)( m_uCurLCS + pHlist->m_uWeight );
if ( HITMAN::IsEnd ( pHlist->m_uHitpos ) && (int)pHlist->m_uQuerypos==m_iMaxQpos && iPos==m_iMaxQpos )
m_tExactHit.BitSet ( uField );
} else
{
if ( (int)uPosWithField>m_iLastHitPos )
m_uCurLCS = BYTE(pHlist->m_uWeight);
if ( iPos==1 && HITMAN::IsEnd ( pHlist->m_uHitpos ) && m_iMaxQpos==1 )
m_tExactHit.BitSet ( uField );
}
if ( m_uCurLCS>m_uLCS[uField] )
{
m_uLCS[uField] = m_uCurLCS;
// for the first hit in current field just use current position as min_best_span_pos
// else adjust for current lcs
if ( !m_iMinBestSpanPos [ uField ] )
m_iMinBestSpanPos [ uField ] = iPos;
else
m_iMinBestSpanPos [ uField ] = iPos - m_uCurLCS + 1;
}
m_iExpDelta = iDelta + pHlist->m_uSpanlen - 1;
m_iLastHitPos = uPosWithField;
} else
{
// reset accumulated data from previous field
if ( (DWORD)HITMAN::GetField ( m_uCurPos )!=uField )
{
m_uCurPos = 0;
m_uLcsTailPos = 0;
m_uCurQposMask = 0;
m_uCurLCS = 0;
}
if ( (DWORD)uPosWithField!=m_uCurPos )
{
// next new and shiny hitpos in line
// FIXME!? what do we do with longer spans? keep looking? reset?
if ( m_uCurLCS<2 )
{
m_uLcsTailPos = m_uCurPos;
m_uLcsTailQposMask = m_uCurQposMask;
m_uCurLCS = 1;
}
m_uCurQposMask = 0;
m_uCurPos = uPosWithField;
if ( m_uLCS [ uField ]<pHlist->m_uWeight )
{
m_uLCS [ uField ] = BYTE ( pHlist->m_uWeight );
m_iMinBestSpanPos [ uField ] = iPos;
m_uLastSpanStart = iPos;
}
}
// add that qpos to current qpos mask (for the current hitpos)
m_uCurQposMask |= ( 1UL << pHlist->m_uQuerypos );
// and check if that results in a better lcs match now
int iDelta = ( m_uCurPos-m_uLcsTailPos );
if ( iDelta && iDelta<32 && ( m_uCurQposMask >> iDelta ) & m_uLcsTailQposMask )
{
// cool, it matched!
m_uLcsTailQposMask = ( 1UL << pHlist->m_uQuerypos ); // our lcs span now ends with a specific qpos
m_uLcsTailPos = m_uCurPos; // and in a specific position
m_uCurLCS = BYTE ( m_uCurLCS+pHlist->m_uWeight ); // and it's longer
m_uCurQposMask = 0; // and we should avoid matching subsequent hits on the same hitpos
// update per-field vector
if ( m_uCurLCS>m_uLCS[uField] )
{
m_uLCS[uField] = m_uCurLCS;
m_iMinBestSpanPos[uField] = m_uLastSpanStart;
}
}
if ( iDelta==m_iExpDelta )
{
if ( HITMAN::IsEnd ( pHlist->m_uHitpos ) && (int)pHlist->m_uQuerypos==m_iMaxQpos && iPos==m_iMaxQpos )
m_tExactHit.BitSet ( uField );
} else
{
if ( iPos==1 && HITMAN::IsEnd ( pHlist->m_uHitpos ) && m_iMaxQpos==1 )
m_tExactHit.BitSet ( uField );
}
m_iExpDelta = iDelta + pHlist->m_uSpanlen - 1;
}
bool bLetsKeepup = false;
// update LCCS
if ( m_iQueryPosLCCS==pHlist->m_uQuerypos && m_iHitPosLCCS==iPos )
{
m_iLenLCCS++;
m_fWeightLCCS += m_dIDF [ pHlist->m_uQuerypos ];
} else
{
if_const ( HANDLE_DUPES && m_iHitPosLCCS && iPos<=m_iHitPosLCCS && m_tHasMultiQpos.BitGet ( pHlist->m_uQuerypos ) )
{
bLetsKeepup = true;
} else
{
m_iLenLCCS = 1;
m_fWeightLCCS = m_dIDF[pHlist->m_uQuerypos];
}
}
if ( !bLetsKeepup )
{
WORD iNextQPos = m_dNextQueryPos[pHlist->m_uQuerypos];
m_iQueryPosLCCS = iNextQPos;
m_iHitPosLCCS = iPos + pHlist->m_uSpanlen + iNextQPos - pHlist->m_uQuerypos - 1;
}
if ( m_dLCCS[uField]<=m_iLenLCCS ) // FIXME!!! check weight too or keep length and weight separate
{
m_dLCCS[uField] = m_iLenLCCS;
m_dWLCCS[uField] = m_fWeightLCCS;
}
// update ATC
if ( m_bHaveAtc )
{
if ( m_uAtcField!=uField || m_iAtcHitCount==XRANK_ATC_BUFFER_LEN )
{
UpdateATC ( m_uAtcField!=uField );
if ( m_uAtcField!=uField )
{
m_uAtcField = uField;
}
if ( m_iAtcHitCount==XRANK_ATC_BUFFER_LEN ) // advance ring buffer
{
m_iAtcHitStart = ( m_iAtcHitStart + XRANK_ATC_WINDOW_LEN ) % XRANK_ATC_BUFFER_LEN;
m_iAtcHitCount -= XRANK_ATC_WINDOW_LEN;
}
}
assert ( m_iAtcHitStart<XRANK_ATC_BUFFER_LEN && m_iAtcHitCount<XRANK_ATC_BUFFER_LEN );
int iRing = ( m_iAtcHitStart + m_iAtcHitCount ) % XRANK_ATC_BUFFER_LEN;
AtcHit_t & tAtcHit = m_dAtcHits [ iRing ];
tAtcHit.m_iHitpos = iPos;
tAtcHit.m_uQuerypos = pHlist->m_uQuerypos;
m_iAtcHitCount++;
}
// update other stuff
m_tMatchedFields.BitSet ( uField );
// keywords can be duplicated in the query, so we need this extra check
WORD uQpos = pHlist->m_uQuerypos;
bool bUniq = m_tKeywords.BitGet ( pHlist->m_uQuerypos );
if_const ( HANDLE_DUPES && bUniq )
{
uQpos = m_dTermDupes [ uQpos ];
bUniq = ( m_dTermsHit[uQpos]!=pHlist->m_uHitpos && m_dTermsHit[0]!=pHlist->m_uHitpos );
m_dTermsHit[uQpos] = pHlist->m_uHitpos;
m_dTermsHit[0] = pHlist->m_uHitpos;
}
if ( bUniq )
{
UpdateFreq ( uQpos, uField );
}
// handle hit with multiple terms
if ( pHlist->m_uSpanlen>1 )
{
WORD uQposSpanned = pHlist->m_uQuerypos+1;
DWORD uQposMask = ( pHlist->m_uQposMask>>uQposSpanned );
while ( uQposMask!=0 )
{
WORD uQposFixed = uQposSpanned;
if ( ( uQposMask & 1 )==1 )
{
bool bUniqSpanned = true;
if_const ( HANDLE_DUPES )
{
uQposFixed = m_dTermDupes[uQposFixed];
bUniqSpanned = ( m_dTermsHit[uQposFixed]!=pHlist->m_uHitpos );
m_dTermsHit[uQposFixed] = pHlist->m_uHitpos;
}
if ( bUniqSpanned )
UpdateFreq ( uQposFixed, uField );
}
uQposSpanned++;
uQposMask = ( uQposMask>>1 );
}
}
if ( !m_iMinHitPos[uField] )
m_iMinHitPos[uField] = iPos;
// update hit window, max_window_hits factor
if ( m_iWindowSize>1 )
{
if ( m_dWindow.GetLength() )
{
// sorted_remove_if ( _1 + winsize <= hitpos ) )
int i = 0;
while ( i<m_dWindow.GetLength() && ( m_dWindow[i] + m_iWindowSize )<=pHlist->m_uHitpos )
i++;
for ( int j=0; j<m_dWindow.GetLength()-i; j++ )
m_dWindow[j] = m_dWindow[j+i];
m_dWindow.Resize ( m_dWindow.GetLength()-i );
}
m_dWindow.Add ( pHlist->m_uHitpos );
m_iMaxWindowHits[uField] = Max ( m_iMaxWindowHits[uField], m_dWindow.GetLength() );
} else
m_iMaxWindowHits[uField] = 1;
// update exact_order factor
if ( (int)uField!=m_iLastField )
{
m_iLastQuerypos = 0;
m_iExactOrderWords = 0;
m_iLastField = (int)uField;
}
if ( pHlist->m_uQuerypos==m_iLastQuerypos+1 )
{
if ( ++m_iExactOrderWords==m_iQueryWordCount )
m_tExactOrder.BitSet ( uField );
m_iLastQuerypos++;
}
// update min_gaps factor
if ( bUniq && m_iHaveMinWindow>1 )
{
uQpos = pHlist->m_uQuerypos;
if_const ( HANDLE_DUPES )
uQpos = m_dTermDupes[uQpos];
switch ( m_iHaveMinWindow )
{
// 2 keywords, special path
case 2:
if ( m_dMinWindowHits.GetLength() && HITMAN::GetField ( m_dMinWindowHits[0].m_uHitpos )!=(int)uField )
{
m_iMinWindowWords = 0;
m_dMinWindowHits.Resize ( 0 );
}
if ( !m_dMinWindowHits.GetLength() )
{
m_dMinWindowHits.Add() = *pHlist; // {} => {A}
m_dMinWindowHits.Last().m_uQuerypos = uQpos;
break;
}
assert ( m_dMinWindowHits.GetLength()==1 );
if ( uQpos==m_dMinWindowHits[0].m_uQuerypos )
m_dMinWindowHits[0].m_uHitpos = pHlist->m_uHitpos;
else
{
UpdateGap ( uField, 2, HITMAN::GetPos ( pHlist->m_uHitpos ) - HITMAN::GetPos ( m_dMinWindowHits[0].m_uHitpos ) - 1 );
m_dMinWindowHits[0] = *pHlist;
m_dMinWindowHits[0].m_uQuerypos = uQpos;
}
break;
// 3 keywords, special path
case 3:
if ( m_dMinWindowHits.GetLength() && HITMAN::GetField ( m_dMinWindowHits.Last().m_uHitpos )!=(int)uField )
{
m_iMinWindowWords = 0;
m_dMinWindowHits.Resize ( 0 );
}
// how many unique words are already there in the current candidate?
switch ( m_dMinWindowHits.GetLength() )
{
case 0:
m_dMinWindowHits.Add() = *pHlist; // {} => {A}
m_dMinWindowHits.Last().m_uQuerypos = uQpos;
break;
case 1:
if ( m_dMinWindowHits[0].m_uQuerypos==uQpos )
m_dMinWindowHits[0] = *pHlist; // {A} + A2 => {A2}
else
{
UpdateGap ( uField, 2, HITMAN::GetPos ( pHlist->m_uHitpos ) - HITMAN::GetPos ( m_dMinWindowHits[0].m_uHitpos ) - 1 );
m_dMinWindowHits.Add() = *pHlist; // {A} + B => {A,B}
m_dMinWindowHits.Last().m_uQuerypos = uQpos;
}
break;
case 2:
if ( m_dMinWindowHits[0].m_uQuerypos==uQpos )
{
UpdateGap ( uField, 2, HITMAN::GetPos ( pHlist->m_uHitpos ) - HITMAN::GetPos ( m_dMinWindowHits[1].m_uHitpos ) - 1 );
m_dMinWindowHits[0] = m_dMinWindowHits[1]; // {A,B} + A2 => {B,A2}
m_dMinWindowHits[1] = *pHlist;
m_dMinWindowHits[1].m_uQuerypos = uQpos;
} else if ( m_dMinWindowHits[1].m_uQuerypos==uQpos )
{
m_dMinWindowHits[1] = *pHlist; // {A,B} + B2 => {A,B2}
m_dMinWindowHits[1].m_uQuerypos = uQpos;
} else
{
// new {A,B,C} window!
// handle, and then immediately reduce it to {B,C}
UpdateGap ( uField, 3, HITMAN::GetPos ( pHlist->m_uHitpos ) - HITMAN::GetPos ( m_dMinWindowHits[0].m_uHitpos ) - 2 );
m_dMinWindowHits[0] = m_dMinWindowHits[1];
m_dMinWindowHits[1] = *pHlist;
m_dMinWindowHits[1].m_uQuerypos = uQpos;
}
break;
default:
assert ( 0 && "min_gaps current window size not in 0..2 range; must not happen" );
}
break;
// slow generic update
default:
UpdateMinGaps ( pHlist );
break;
}
}
}
template < bool PF, bool HANDLE_DUPES >
void RankerState_Expr_fn<PF, HANDLE_DUPES>::UpdateFreq ( WORD uQpos, DWORD uField )
{
float fIDF = m_dIDF [ uQpos ];
DWORD uHitPosMask = 1u<<uQpos;
if ( !( m_uWordCount[uField] & uHitPosMask ) )
m_dSumIDF[uField] += fIDF;
if ( fIDF < m_dMinIDF[uField] )
m_dMinIDF[uField] = fIDF;
if ( fIDF > m_dMaxIDF[uField] )
m_dMaxIDF[uField] = fIDF;
m_uHitCount[uField]++;
m_uWordCount[uField] |= uHitPosMask;
m_uDocWordCount |= uHitPosMask;
m_dTFIDF[uField] += fIDF;
// avoid duplicate check for BM25A, BM25F though
// (that sort of automatically accounts for qtf factor)
m_dTF [ uQpos ]++;
m_dFieldTF [ uField*(1+m_iMaxQpos) + uQpos ]++;
}
template < bool PF, bool HANDLE_DUPES >
void RankerState_Expr_fn<PF, HANDLE_DUPES>::UpdateMinGaps ( const ExtHit_t * pHlist )
{
// update the minimum MW, aka matching window, for min_gaps and ymw factors
// we keep a window with all the positions of all the matched words
// we keep it left-minimal at all times, so that leftmost keyword only occurs once
// thus, when a previously unseen keyword is added, the window is guaranteed to be minimal
WORD uQpos = pHlist->m_uQuerypos;
if_const ( HANDLE_DUPES )
uQpos = m_dTermDupes[uQpos];
// handle field switch
const int iField = HITMAN::GetField ( pHlist->m_uHitpos );
if ( m_dMinWindowHits.GetLength() && HITMAN::GetField ( m_dMinWindowHits.Last().m_uHitpos )!=iField )
{
m_dMinWindowHits.Resize ( 0 );
m_dMinWindowCounts.Fill ( 0 );
m_iMinWindowWords = 0;
}
// assert we are left-minimal
assert ( m_dMinWindowHits.GetLength()==0 || m_dMinWindowCounts [ m_dMinWindowHits[0].m_uQuerypos ]==1 );
// another occurrence of the trailing word?
// just update hitpos, effectively dumping the current occurrence
if ( m_dMinWindowHits.GetLength() && m_dMinWindowHits.Last().m_uQuerypos==uQpos )
{
m_dMinWindowHits.Last().m_uHitpos = pHlist->m_uHitpos;
return;
}
// add that word
LeanHit_t & t = m_dMinWindowHits.Add();
t.m_uQuerypos = uQpos;
t.m_uHitpos = pHlist->m_uHitpos;
int iWord = uQpos;
m_dMinWindowCounts[iWord]++;
// new, previously unseen keyword? just update the window size
if ( m_dMinWindowCounts[iWord]==1 )
{
m_iMinGaps[iField] = HITMAN::GetPos ( pHlist->m_uHitpos ) - HITMAN::GetPos ( m_dMinWindowHits[0].m_uHitpos ) - m_iMinWindowWords;
m_iMinWindowWords++;
return;
}
// check if we can shrink the left boundary
if ( iWord!=m_dMinWindowHits[0].m_uQuerypos )
return;
// yes, we can!
// keep removing the leftmost keyword until it's unique (in the window) again
assert ( m_dMinWindowCounts [ m_dMinWindowHits[0].m_uQuerypos ]==2 );
int iShrink = 0;
while ( m_dMinWindowCounts [ m_dMinWindowHits [ iShrink ].m_uQuerypos ]!=1 )
{
m_dMinWindowCounts [ m_dMinWindowHits [ iShrink ].m_uQuerypos ]--;
iShrink++;
}
int iNewLen = m_dMinWindowHits.GetLength() - iShrink;
memmove ( m_dMinWindowHits.Begin(), &m_dMinWindowHits[iShrink], iNewLen*sizeof(LeanHit_t) );
m_dMinWindowHits.Resize ( iNewLen );
int iNewGaps = HITMAN::GetPos ( pHlist->m_uHitpos ) - HITMAN::GetPos ( m_dMinWindowHits[0].m_uHitpos ) - m_iMinWindowWords + 1;
m_iMinGaps[iField] = Min ( m_iMinGaps[iField], iNewGaps );
}
template<bool A1, bool A2>
int RankerState_Expr_fn<A1,A2>::GetMaxPackedLength()
{
return sizeof(DWORD)*( 8 + m_iFields*15 + m_iMaxQpos*4 + m_dFieldTF.GetLength() ) + m_tExactHit.GetSizeBytes() + m_tExactOrder.GetSizeBytes();
}
template < bool NEED_PACKEDFACTORS, bool HANDLE_DUPES >
BYTE * RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES>::PackFactors()
{
auto * pPackStart = (DWORD *)m_tFactorPool.Alloc();
DWORD * pPack = pPackStart;
assert ( pPackStart );
// leave space for size
pPack++;
assert ( m_tMatchedFields.GetSizeBytes()==m_tExactHit.GetSizeBytes() && m_tExactHit.GetSizeBytes()==m_tExactOrder.GetSizeBytes() );
// document level factors
*pPack++ = m_uDocBM25;
*pPack++ = sphF2DW ( m_fDocBM25A );
*pPack++ = *m_tMatchedFields.Begin();
*pPack++ = m_uDocWordCount;
// field level factors
*pPack++ = (DWORD)m_iFields;
// v.6 set these depends on number of fields
for ( DWORD i=0; i<m_tExactHit.GetSizeBytes()/sizeof(DWORD); i++ )
*pPack++ = *( m_tExactHit.Begin() + i );
for ( DWORD i=0; i<m_tExactOrder.GetSizeBytes()/sizeof(DWORD); i++ )
*pPack++ = *( m_tExactOrder.Begin() + i );
for ( int i=0; i<m_iFields; i++ )
{
DWORD uHit = m_uHitCount[i];
*pPack++ = uHit;
if ( uHit )
{
*pPack++ = (DWORD)i;
*pPack++ = m_uLCS[i];
*pPack++ = m_uWordCount[i];
*pPack++ = sphF2DW ( m_dTFIDF[i] );
*pPack++ = sphF2DW ( m_dMinIDF[i] );
*pPack++ = sphF2DW ( m_dMaxIDF[i] );
*pPack++ = sphF2DW ( m_dSumIDF[i] );
*pPack++ = (DWORD)m_iMinHitPos[i];
*pPack++ = (DWORD)m_iMinBestSpanPos[i];
// had exact_hit here before v.4
*pPack++ = (DWORD)m_iMaxWindowHits[i];
*pPack++ = (DWORD)m_iMinGaps[i]; // added in v.3
*pPack++ = sphF2DW ( m_dAtc[i] ); // added in v.4
*pPack++ = m_dLCCS[i]; // added in v.5
*pPack++ = sphF2DW ( m_dWLCCS[i] ); // added in v.5
}
}
// word level factors
*pPack++ = (DWORD)m_iMaxQpos;
for ( int i=1; i<=m_iMaxQpos; i++ )
{
DWORD uKeywordMask = !IsTermSkipped(i); // !COMMIT !m_tExcluded.BitGet(i);
*pPack++ = uKeywordMask;
if ( uKeywordMask )
{
*pPack++ = (DWORD)i;
*pPack++ = (DWORD)m_dTF[i];
*pPack++ = *(DWORD*)&m_dIDF[i];
}
}
// m_dFieldTF = iWord + iField * ( 1 + iWordsCount )
// FIXME! pack these sparse factors ( however these should fit into fixed-size FactorPool block )
*pPack++ = m_dFieldTF.GetLength();
memcpy ( pPack, m_dFieldTF.Begin(), m_dFieldTF.GetLength()*sizeof(m_dFieldTF[0]) );
pPack += m_dFieldTF.GetLength();
*pPackStart = (DWORD)((pPack-pPackStart)*sizeof(DWORD));
assert ( (pPack-pPackStart)*sizeof(DWORD)<=(DWORD)m_tFactorPool.GetElementSize() );
return (BYTE*)pPackStart;
}
template <bool NEED_PACKEDFACTORS, bool HANDLE_DUPES>
bool RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES>::ExtraDataImpl ( ExtraData_e eType, void ** ppResult )
{
if_const ( !( eType==EXTRA_SET_BLOBPOOL || eType==EXTRA_SET_COLUMNAR ) && !NEED_PACKEDFACTORS )
return false;
switch ( eType )
{
case EXTRA_SET_BLOBPOOL:
m_pExpr->Command ( SPH_EXPR_SET_BLOB_POOL, *ppResult );
return true;
case EXTRA_SET_COLUMNAR:
m_pExpr->Command ( SPH_EXPR_SET_COLUMNAR, *ppResult );
return true;
case EXTRA_SET_POOL_CAPACITY:
m_iPoolMatchCapacity = *(int*)ppResult;
m_iPoolMatchCapacity += MAX_BLOCK_DOCS;
return true;
case EXTRA_SET_MATCHPUSHED:
m_tFactorPool.AddRef ( *(RowTagged_t*)ppResult );
return true;
case EXTRA_SET_MATCHPOPPED:
for ( const RowTagged_t & tRow : *(CSphTightVector<RowTagged_t> *) ppResult )
m_tFactorPool.Release ( tRow );
return true;
case EXTRA_GET_DATA_PACKEDFACTORS:
*ppResult = m_tFactorPool.GetHashPtr();
return true;
case EXTRA_GET_DATA_RANKER_STATE:
{
auto * pState = (SphExtraDataRankerState_t *)ppResult;
pState->m_iFields = m_iFields;
pState->m_pSchema = m_pSchema;
pState->m_pFieldLens = m_pFieldLens;
pState->m_iTotalDocuments = m_iTotalDocuments;
pState->m_tFieldLensLoc = m_tFieldLensLoc;
pState->m_iMaxQpos = m_iMaxQpos;
}
return true;
case EXTRA_SET_MATCHTAG:
m_iMatchTag = *(int*)ppResult;
return true;
default:
return false;
}
return true;
}
/// finish document processing, compute weight from factors
template < bool NEED_PACKEDFACTORS, bool HANDLE_DUPES >
int RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES>::Finalize ( const CSphMatch & tMatch )
{
#ifndef NDEBUG
// sanity check
for ( int i=0; i<m_iFields; ++i )
{
assert ( m_iMinHitPos[i]<=m_iMinBestSpanPos[i] );
if ( m_uLCS[i]==1 )
assert ( m_iMinHitPos[i]==m_iMinBestSpanPos[i] );
}
#endif // NDEBUG
// finishing touches
FinalizeDocFactors ( tMatch );
UpdateATC ( true );
if_const ( NEED_PACKEDFACTORS )
{
// pack factors
if ( !m_tFactorPool.IsInitialized() )
m_tFactorPool.Prealloc ( GetMaxPackedLength(), m_iPoolMatchCapacity );
m_tFactorPool.AddToHash ( RowTagged_t ( tMatch.m_tRowID, m_iMatchTag ), PackFactors() );
}
// compute expression
int iRes = ( m_eExprType==SPH_ATTR_INTEGER )
? m_pExpr->IntEval ( tMatch )
: (int)m_pExpr->Eval ( tMatch );
if_const ( HANDLE_DUPES )
{
m_uCurPos = 0;
m_uLcsTailPos = 0;
m_uLcsTailQposMask = 0;
m_uCurQposMask = 0;
}
// cleanup
ResetDocFactors();
m_dLCCS.Fill ( 0 );
m_dWLCCS.Fill ( 0 );
m_iQueryPosLCCS = 0;
m_iHitPosLCCS = 0;
m_iLenLCCS = 0;
m_fWeightLCCS = 0.0f;
// done
return iRes;
}
template < bool NEED_PACKEDFACTORS, bool HANDLE_DUPES >
bool RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES>::IsTermSkipped ( int iTerm )
{
assert ( iTerm>=0 && iTerm<m_iMaxQpos+1 );
if_const ( HANDLE_DUPES )
return !m_tKeywords.BitGet ( iTerm ) || m_dTermDupes[iTerm]!=iTerm;
else
return !m_tKeywords.BitGet ( iTerm );
}
template < bool NEED_PACKEDFACTORS, bool HANDLE_DUPES >
float RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES>::TermTC ( int iTerm, bool bLeft )
{
// border case short-cut
if ( ( bLeft && iTerm==m_iAtcHitStart ) || ( !bLeft && iTerm==m_iAtcHitStart+m_iAtcHitCount-1 ) )
return 0.0f;
int iRing = iTerm % XRANK_ATC_BUFFER_LEN;
int iHitpos = m_dAtcHits[iRing].m_iHitpos;
WORD uQuerypos = m_dAtcHits[iRing].m_uQuerypos;
m_dAtcProcessedTerms.Clear();
float fTC = 0.0f;
// loop bounds for down \ up climbing
int iStart, iEnd, iStep;
if ( bLeft )
{
iStart = iTerm - 1;
iEnd = Max ( iStart - XRANK_ATC_WINDOW_LEN, m_iAtcHitStart-1 );
iStep = -1;
} else
{
iStart = iTerm + 1;
iEnd = Min ( iStart + XRANK_ATC_WINDOW_LEN, m_iAtcHitStart + m_iAtcHitCount );
iStep = 1;
}
int iFound = 0;
for ( int i=iStart; i!=iEnd && iFound!=m_iMaxQpos; i+=iStep )
{
iRing = i % XRANK_ATC_BUFFER_LEN;
const AtcHit_t & tCur = m_dAtcHits[iRing];
bool bGotDup = ( uQuerypos==tCur.m_uQuerypos );
if ( m_dAtcProcessedTerms.BitGet ( tCur.m_uQuerypos ) || iHitpos==tCur.m_iHitpos )
continue;
auto fWeightedDist = (float)pow ( float ( abs ( iHitpos - tCur.m_iHitpos ) ), XRANK_ATC_EXP );
float fTermTC = ( m_dIDF[tCur.m_uQuerypos] / fWeightedDist );
if ( bGotDup )
fTermTC *= XRANK_ATC_DUP_DIV;
fTC += fTermTC;
m_dAtcProcessedTerms.BitSet ( tCur.m_uQuerypos );
iFound++;
}
return fTC;
}
template < bool NEED_PACKEDFACTORS, bool HANDLE_DUPES >
void RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES>::UpdateATC ( bool bFlushField )
{
if ( !m_iAtcHitCount )
return;
int iWindowStart = m_iAtcHitStart + XRANK_ATC_WINDOW_LEN;
int iWindowEnd = Min ( iWindowStart + XRANK_ATC_WINDOW_LEN, m_iAtcHitStart+m_iAtcHitCount );
// border cases (hits: +below ATC window collected since start of buffer; +at the end of buffer and less then ATC window)
if ( !m_bAtcHeadProcessed )
iWindowStart = m_iAtcHitStart;
if ( bFlushField )
iWindowEnd = m_iAtcHitStart+m_iAtcHitCount;
assert ( iWindowStart<iWindowEnd && iWindowStart>=m_iAtcHitStart && iWindowEnd<=m_iAtcHitStart+m_iAtcHitCount );
// per term ATC
// sigma(t' E query) ( idf(t') \ left_deltapos(t, t')^z + idf (t') \ right_deltapos(t,t')^z ) * ( t==t' ? 0.25 : 1 )
for ( int iWinPos=iWindowStart; iWinPos<iWindowEnd; iWinPos++ )
{
float fTC = TermTC ( iWinPos, true ) + TermTC ( iWinPos, false );
int iRing = iWinPos % XRANK_ATC_BUFFER_LEN;
m_dAtcTerms [ m_dAtcHits[iRing].m_uQuerypos ] += fTC;
}
m_bAtcHeadProcessed = true;
if ( bFlushField )
{
float fWeightedSum = 0.0f;
ARRAY_FOREACH ( i, m_dAtcTerms )
{
fWeightedSum += m_dAtcTerms[i] * m_dIDF[i];
m_dAtcTerms[i] = 0.0f;
}
m_dAtc[m_uAtcField] = (float)log ( 1.0f + fWeightedSum );
m_iAtcHitStart = 0;
m_iAtcHitCount = 0;
m_bAtcHeadProcessed = false;
}
}
/// expression ranker
template < bool NEED_PACKEDFACTORS, bool HANDLE_DUPES >
class ExtRanker_Expr_T : public ExtRanker_State_T< RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES>, true >
{
using BASE = ExtRanker_State_T< RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES>, true >;
public:
ExtRanker_Expr_T ( const XQQuery_t & tXQ, const ISphQwordSetup & tSetup, const char * sExpr, const CSphSchema & tSchema, const RankerSettings_t & tSettings )
: ExtRanker_State_T< RankerState_Expr_fn<NEED_PACKEDFACTORS, HANDLE_DUPES>, true > ( tXQ, tSetup, tSettings )
{
// tricky bit, we stash the pointer to expr here, but it will be parsed
// somewhat later during InitState() call, when IDFs etc are computed
this->m_tState.m_sExpr = sExpr;
this->m_tState.m_pSchema = &tSchema;
}
void SetQwordsIDF ( const ExtQwordsHash_t & hQwords ) final
{
BASE::SetQwordsIDF ( hQwords );
this->m_tState.m_iMaxQpos = this->m_iMaxQpos;
this->m_tState.SetQwords ( hQwords );
}
int GetMatches () final
{
if_const ( NEED_PACKEDFACTORS )
this->m_tState.FlushMatches ();
return BASE::GetMatches();
}
void SetTermDupes ( const ExtQwordsHash_t & hQwords, int iMaxQpos ) final
{
this->m_tState.SetTermDupes ( hQwords, iMaxQpos, this->m_pRoot.get() );
}
};
//////////////////////////////////////////////////////////////////////////
// EXPRESSION FACTORS EXPORT RANKER
//////////////////////////////////////////////////////////////////////////
/// ranker state that computes BM25 as weight, but also all known factors for export purposes
template <bool HANDLE_DUPES>
struct RankerState_Export_fn : public RankerState_Expr_fn<false, HANDLE_DUPES>
{
using BASE = RankerState_Expr_fn<false, HANDLE_DUPES>;
public:
CSphOrderedHash < CSphString, RowID_t, IdentityHash_fn, 256 > m_hFactors;
public:
RankerState_Export_fn()
{
BASE::m_bWantAtc = true;
}
int Finalize ( const CSphMatch & tMatch )
{
// finalize factor computations
BASE::FinalizeDocFactors ( tMatch );
// build document level factors
// FIXME? should we build query level factors too? max_lcs, query_word_count, etc
const int MAX_STR_LEN = 1024;
CSphVector<char> dVal;
dVal.Resize ( MAX_STR_LEN );
snprintf ( dVal.Begin(), dVal.GetLength(), "bm25=%d, bm25a=%f, field_mask=%d, doc_word_count=%d",
BASE::m_uDocBM25, BASE::m_fDocBM25A, *BASE::m_tMatchedFields.Begin(), BASE::m_uDocWordCount );
char sTmp[MAX_STR_LEN];
// build field level factors
for ( int i=0; i<BASE::m_iFields; i++ )
{
if ( !BASE::m_uHitCount[i] )
continue;
snprintf ( sTmp, MAX_STR_LEN, ", field%d="
"(lcs=%d, hit_count=%d, word_count=%d, "
"tf_idf=%f, min_idf=%f, max_idf=%f, sum_idf=%f, "
"min_hit_pos=%d, min_best_span_pos=%d, exact_hit=%d, max_window_hits=%d, "
"min_gaps=%d, exact_order=%d, lccs=%d, wlccs=%f, atc=%f)",
i,
BASE::m_uLCS[i], BASE::m_uHitCount[i], BASE::m_uWordCount[i],
BASE::m_dTFIDF[i], BASE::m_dMinIDF[i], BASE::m_dMaxIDF[i], BASE::m_dSumIDF[i],
BASE::m_iMinHitPos[i], BASE::m_iMinBestSpanPos[i], BASE::m_tExactHit.BitGet ( i ), BASE::m_iMaxWindowHits[i],
BASE::m_iMinGaps[i], BASE::m_tExactOrder.BitGet(i), BASE::m_dLCCS[i], BASE::m_dWLCCS[i], BASE::m_dAtc[i] );
auto iValLen = (int) strlen ( dVal.Begin() );
auto iTotalLen = iValLen+(int)strlen(sTmp);
if ( dVal.GetLength() < iTotalLen+1 )
dVal.Resize ( iTotalLen+1 );
strcpy ( &(dVal[iValLen]), sTmp ); //NOLINT
}
// build word level factors
for ( int i=1; i<=BASE::m_iMaxQpos; i++ )
if ( !BASE::IsTermSkipped(i) )
{
snprintf ( sTmp, MAX_STR_LEN, ", word%d=(tf=%d, idf=%f)", i-1, BASE::m_dTF[i], BASE::m_dIDF[i] );
auto iValLen = (int)strlen ( dVal.Begin() );
auto iTotalLen = iValLen+(int)strlen(sTmp);
if ( dVal.GetLength() < iTotalLen+1 )
dVal.Resize ( iTotalLen+1 );
strcpy ( &(dVal[iValLen]), sTmp ); //NOLINT
}
// export factors
m_hFactors.Add ( dVal.Begin(), tMatch.m_tRowID );
// compute sorting expression now
int iRes = ( BASE::m_eExprType==SPH_ATTR_INTEGER )
? BASE::m_pExpr->IntEval ( tMatch )
: (int)BASE::m_pExpr->Eval ( tMatch );
// cleanup and return!
BASE::ResetDocFactors();
return iRes;
}
bool ExtraDataImpl ( ExtraData_e eType, void ** ppResult ) final
{
if ( eType==EXTRA_GET_DATA_RANKFACTORS )
*ppResult = &m_hFactors;
return true;
}
};
/// export ranker that emits BM25 as the weight, but computes and export all the factors
/// useful for research purposes, eg. exporting the data for machine learning
template < bool HANDLE_DUPES >
class ExtRanker_Export_T : public ExtRanker_State_T<RankerState_Export_fn<HANDLE_DUPES>, true>
{
using BASE = ExtRanker_State_T<RankerState_Export_fn<HANDLE_DUPES>, true>;
public:
ExtRanker_Export_T ( const XQQuery_t & tXQ, const ISphQwordSetup & tSetup, const char * sExpr, const CSphSchema & tSchema, const RankerSettings_t & tSettings )
: BASE ( tXQ, tSetup, tSettings )
{
BASE::m_tState.m_sExpr = sExpr;
BASE::m_tState.m_pSchema = &tSchema;
}
void SetQwordsIDF ( const ExtQwordsHash_t & hQwords ) final
{
BASE::SetQwordsIDF ( hQwords );
BASE::m_tState.m_iMaxQpos = BASE::m_iMaxQpos;
BASE::m_tState.SetQwords ( hQwords );
}
void SetTermDupes ( const ExtQwordsHash_t & hQwords, int iMaxQpos ) final
{
BASE::m_tState.SetTermDupes ( hQwords, iMaxQpos, BASE::m_pRoot.get() );
}
};
//////////////////////////////////////////////////////////////////////////
// RANKER FACTORY
//////////////////////////////////////////////////////////////////////////
struct ExtQwordOrderbyQueryPos_t
{
bool IsLess ( const ExtQword_t * pA, const ExtQword_t * pB ) const
{
return pA->m_iQueryPos<pB->m_iQueryPos;
}
};
static bool HasQwordDupes ( XQNode_t * pNode, SmallStringHash_T<int> & hQwords )
{
ARRAY_FOREACH ( i, pNode->m_dChildren )
if ( HasQwordDupes ( pNode->m_dChildren[i], hQwords ) )
return true;
ARRAY_FOREACH ( i, pNode->m_dWords )
if ( !hQwords.Add ( 1, pNode->m_dWords[i].m_sWord ) )
return true;
return false;
}
static bool HasQwordDupes ( XQNode_t * pNode )
{
SmallStringHash_T<int> hQwords;
return HasQwordDupes ( pNode, hQwords );
}
std::unique_ptr<ISphRanker> sphCreateRanker ( const XQQuery_t & tXQ, const CSphQuery & tQuery, CSphQueryResultMeta & tMeta, const ISphQwordSetup & tTermSetup, const CSphQueryContext & tCtx, const ISphSchema & tSorterSchema )
{
// shortcut
const CSphIndex * pIndex = tTermSetup.m_pIndex;
// fill payload mask
DWORD uPayloadMask = 0;
for ( int i=0; i < pIndex->GetMatchSchema().GetFieldsCount(); i++ )
uPayloadMask |= pIndex->GetMatchSchema().GetField(i).m_bPayload << i;
bool bGotDupes = HasQwordDupes ( tXQ.m_pRoot );
RankerSettings_t tRankerSettings;
tRankerSettings.m_bSkipQCache = tCtx.m_bSkipQCache;
// can we serve this from cache?
QcacheEntryRefPtr_t pCached;
if ( !tRankerSettings.m_bSkipQCache )
pCached = QcacheFind ( pIndex->GetIndexId(), tQuery, tSorterSchema );
if ( pCached )
return QcacheRanker ( pCached, tTermSetup );
// we need this for rankers that populate nodes with docs immediately after creation (e.g. payload nodes)
tRankerSettings.m_bRowidLimits = tQuery.m_dFilters.any_of ( []( auto & tFilter ){ return tFilter.m_sAttrName=="@rowid"; } );
if ( tRankerSettings.m_bRowidLimits )
GetRowIdFilter ( tQuery.m_dFilters, tCtx.m_iIndexTotalDocs, tRankerSettings.m_tBoundaries );
// setup eval-tree
std::unique_ptr<ExtRanker_c> pRanker;
switch ( tQuery.m_eRanker )
{
case SPH_RANK_PROXIMITY_BM25:
if ( uPayloadMask )
pRanker = std::make_unique<ExtRanker_State_T < RankerState_ProximityPayload_fn<true>, true >> ( tXQ, tTermSetup, tRankerSettings );
else if ( tXQ.m_bSingleWord )
pRanker = std::make_unique<ExtRanker_WeightSum_c<WITH_BM25>> ( tXQ, tTermSetup, tRankerSettings );
else if ( bGotDupes )
pRanker = std::make_unique<ExtRanker_State_T<RankerState_Proximity_fn<true, true>, true>> ( tXQ, tTermSetup, tRankerSettings );
else
pRanker = std::make_unique<ExtRanker_State_T<RankerState_Proximity_fn<true, false>, true>> ( tXQ, tTermSetup, tRankerSettings );
break;
case SPH_RANK_BM25:
pRanker = std::make_unique < ExtRanker_WeightSum_c<WITH_BM25>> ( tXQ, tTermSetup, tRankerSettings );
break;
case SPH_RANK_NONE:
pRanker = std::make_unique < ExtRanker_None_c> ( tXQ, tTermSetup, tRankerSettings );
break;
case SPH_RANK_WORDCOUNT:
pRanker = std::make_unique < ExtRanker_State_T < RankerState_Wordcount_fn, false >> ( tXQ, tTermSetup, tRankerSettings );
break;
case SPH_RANK_PROXIMITY:
if ( tXQ.m_bSingleWord )
pRanker = std::make_unique < ExtRanker_WeightSum_c<>> ( tXQ, tTermSetup, tRankerSettings );
else if ( bGotDupes )
pRanker = std::make_unique < ExtRanker_State_T < RankerState_Proximity_fn<false,true>, false >> ( tXQ, tTermSetup, tRankerSettings );
else
pRanker = std::make_unique < ExtRanker_State_T < RankerState_Proximity_fn<false,false>, false >> ( tXQ, tTermSetup, tRankerSettings );
break;
case SPH_RANK_MATCHANY:
pRanker = std::make_unique < ExtRanker_State_T < RankerState_MatchAny_fn, false>> ( tXQ, tTermSetup, tRankerSettings );
break;
case SPH_RANK_FIELDMASK:
pRanker = std::make_unique < ExtRanker_State_T < RankerState_Fieldmask_fn, false >> ( tXQ, tTermSetup, tRankerSettings );
break;
case SPH_RANK_SPH04:
if ( bGotDupes )
pRanker = std::make_unique < ExtRanker_State_T < RankerState_ProximityBM25Exact_T<true>, true>> ( tXQ, tTermSetup, tRankerSettings );
else
pRanker = std::make_unique < ExtRanker_State_T < RankerState_ProximityBM25Exact_T<false>, true>> ( tXQ, tTermSetup, tRankerSettings );
break;
case SPH_RANK_EXPR:
{
// we need that mask in case these factors usage:
// min_idf,max_idf,sum_idf,hit_count,word_count,doc_word_count,tf_idf,tf,field_tf
// however ranker expression got parsed later at Init stage
// FIXME!!! move QposMask initialization past Init
tTermSetup.m_bSetQposMask = true;
bool bNeedFactors = !!(tCtx.m_uPackedFactorFlags & SPH_FACTOR_ENABLE);
if ( bNeedFactors && bGotDupes )
pRanker = std::make_unique < ExtRanker_Expr_T <true, true>> ( tXQ, tTermSetup, tQuery.m_sRankerExpr.cstr(), pIndex->GetMatchSchema(), tRankerSettings );
else if ( bNeedFactors && !bGotDupes )
pRanker = std::make_unique < ExtRanker_Expr_T <true, false>> ( tXQ, tTermSetup, tQuery.m_sRankerExpr.cstr(), pIndex->GetMatchSchema(), tRankerSettings );
else if ( !bNeedFactors && bGotDupes )
pRanker = std::make_unique < ExtRanker_Expr_T <false, true>> ( tXQ, tTermSetup, tQuery.m_sRankerExpr.cstr(), pIndex->GetMatchSchema(), tRankerSettings );
else if ( !bNeedFactors && !bGotDupes )
pRanker = std::make_unique < ExtRanker_Expr_T <false, false>> ( tXQ, tTermSetup, tQuery.m_sRankerExpr.cstr(), pIndex->GetMatchSchema(), tRankerSettings );
}
break;
case SPH_RANK_EXPORT:
// TODO: replace Export ranker to Expression ranker to remove duplicated code
tTermSetup.m_bSetQposMask = true;
if ( bGotDupes )
pRanker = std::make_unique <ExtRanker_Export_T<true>> ( tXQ, tTermSetup, tQuery.m_sRankerExpr.cstr(), pIndex->GetMatchSchema(), tRankerSettings );
else
pRanker = std::make_unique <ExtRanker_Export_T<false>> ( tXQ, tTermSetup, tQuery.m_sRankerExpr.cstr(), pIndex->GetMatchSchema(), tRankerSettings );
break;
default:
tMeta.m_sWarning.SetSprintf ( "unknown ranking mode %d; using default", (int) tQuery.m_eRanker );
if ( bGotDupes )
pRanker = std::make_unique < ExtRanker_State_T < RankerState_Proximity_fn<true,true>, true >> ( tXQ, tTermSetup, tRankerSettings );
else
pRanker = std::make_unique < ExtRanker_State_T < RankerState_Proximity_fn<true,false>, false >> ( tXQ, tTermSetup, tRankerSettings );
break;
case SPH_RANK_PLUGIN:
{
auto p = PluginGet<PluginRanker_c> ( PLUGIN_RANKER, tQuery.m_sUDRanker.cstr() );
// might be a case for query to distributed index
if ( p )
{
pRanker = std::make_unique < ExtRanker_State_T < RankerState_Plugin_fn, true >> ( tXQ, tTermSetup, tRankerSettings );
pRanker->ExtraData ( EXTRA_SET_RANKER_PLUGIN, (void**)&p );
pRanker->ExtraData ( EXTRA_SET_RANKER_PLUGIN_OPTS, (void**) tQuery.m_sUDRankerOpts.cstr() );
} else
{
// create default ranker in case of missed plugin
tMeta.m_sWarning.SetSprintf ( "unknown ranker plugin '%s'; using default", tQuery.m_sUDRanker.cstr() );
if ( bGotDupes )
pRanker = std::make_unique < ExtRanker_State_T < RankerState_Proximity_fn<true,true>, true >> ( tXQ, tTermSetup, tRankerSettings );
else
pRanker = std::make_unique < ExtRanker_State_T < RankerState_Proximity_fn<true,false>, true >> ( tXQ, tTermSetup, tRankerSettings );
}
}
break;
}
assert ( pRanker );
pRanker->m_uPayloadMask = uPayloadMask;
if ( tQuery.m_bGlobalIDF && !pIndex->HasGlobalIDF() )
tMeta.m_sWarning.SetSprintf ( "query sets global_idf, but global_idf is missing from the index" );
// setup IDFs
ExtQwordsHash_t hQwords;
int iMaxQpos = pRanker->GetQwords ( hQwords );
const int iQwords = hQwords.GetLength ();
int64_t iTotalDocuments = tCtx.m_iTotalDocs;
CSphVector<const ExtQword_t *> dWords;
dWords.Reserve ( iQwords );
for ( auto& hQword : hQwords )
{
ExtQword_t & tWord = hQword.second;
int64_t iTermDocs = tWord.m_iDocs;
// shared docs count
if ( tCtx.m_pLocalDocs )
{
int64_t * pDocs = (*tCtx.m_pLocalDocs)( tWord.m_sWord );
if ( pDocs )
iTermDocs = *pDocs;
}
// build IDF
float fIDF = 0.0f;
if ( tQuery.m_bGlobalIDF )
fIDF = pIndex->GetGlobalIDF ( tWord.m_sWord, iTermDocs, tQuery.m_bPlainIDF );
else if ( iTermDocs )
{
// (word_docs > total_docs) case *is* occasionally possible
// because of dupes, or delayed purging in RT, etc
// FIXME? we don't expect over 4G docs per just 1 local index
const int64_t iTotalClamped = Max ( iTotalDocuments, iTermDocs );
if ( !tQuery.m_bPlainIDF )
{
// bm25 variant, idf = log((N-n+1)/n), as per Robertson et al
//
// idf \in [-log(N), log(N)]
// weight \in [-NumWords*log(N), NumWords*log(N)]
// we want weight \in [0, 1] range
// we prescale idfs and get weight \in [-0.5, 0.5] range
// then add 0.5 as our final step
//
// for the record, idf = log((N-n+0.5)/(n+0.5)) in the original paper
// but our variant is a bit easier to compute, and has a better (symmetric) range
float fLogTotal = logf ( float ( 1+iTotalClamped ) );
fIDF = logf ( float ( iTotalClamped-iTermDocs+1 ) / float ( iTermDocs ) )
/ ( 2*fLogTotal );
} else
{
// plain variant, idf=log(N/n), as per Sparck-Jones
//
// idf \in [0, log(N)]
// weight \in [0, NumWords*log(N)]
// we prescale idfs and get weight in [0, 0.5] range
// then add 0.5 as our final step
float fLogTotal = logf ( float ( 1+iTotalClamped ) );
fIDF = logf ( float ( iTotalClamped ) / float ( iTermDocs ) )
/ ( 2*fLogTotal );
}
}
// optionally normalize IDFs so that sum(TF*IDF) fits into [0, 1]
if ( tQuery.m_bNormalizedTFIDF )
fIDF /= iQwords;
tWord.m_fIDF = fIDF * tWord.m_fBoost;
dWords.Add ( &tWord );
}
dWords.Sort ( ExtQwordOrderbyQueryPos_t() );
ARRAY_FOREACH ( i, dWords )
{
const ExtQword_t * pWord = dWords[i];
if ( !pWord->m_bExpanded )
tMeta.AddStat ( pWord->m_sDictWord, pWord->m_iDocs, pWord->m_iHits );
}
pRanker->m_iMaxQpos = iMaxQpos;
pRanker->SetQwordsIDF ( hQwords );
if ( bGotDupes )
pRanker->SetTermDupes ( hQwords, iMaxQpos );
if ( !pRanker->InitState ( tCtx, tMeta.m_sError ) )
pRanker.reset();
return pRanker;
}
//////////////////////////////////////////////////////////////////////////
/// HIT MARKER
//////////////////////////////////////////////////////////////////////////
void CSphHitMarker::Mark ( CSphVector<SphHitMark_t> & dMarked )
{
if ( !m_pRoot )
return;
const ExtHit_t * pHits = nullptr;
const ExtDoc_t * pDocs = nullptr;
pDocs = m_pRoot->GetDocsChunk();
if ( !pDocs )
return;
pHits = m_pRoot->GetHits ( pDocs );
for ( ; pHits->m_tRowID!=INVALID_ROWID; pHits++ )
{
SphHitMark_t tMark;
tMark.m_uPosition = HITMAN::GetPosWithField ( pHits->m_uHitpos );
tMark.m_uSpan = pHits->m_uMatchlen;
dMarked.Add ( tMark );
}
}
CSphHitMarker::~CSphHitMarker ()
{
SafeDelete ( m_pRoot );
}
CSphHitMarker * CSphHitMarker::Create ( const XQNode_t * pRoot, const ISphQwordSetup & tSetup )
{
ExtNode_i * pNode = nullptr;
if ( pRoot )
pNode = ExtNode_i::Create ( pRoot, tSetup, false, nullptr );
if ( !pNode )
return nullptr;
CSphHitMarker * pMarker = new CSphHitMarker;
pMarker->m_pRoot = pNode;
pMarker->m_pRoot->SetCollectHits();
return pMarker;
}
CSphString sphXQNodeToStr ( const XQNode_t * pNode )
{
static const char * szNodeNames[] =
{
"AND",
"OR",
"MAYBE",
"NOT",
"ANDNOT",
"BEFORE",
"PHRASE",
"PROXIMITY",
"QUORUM",
"NEAR",
"NOTNEAR",
"SENTENCE",
"PARAGRAPH"
};
if ( pNode->GetOp()>=SPH_QUERY_AND && pNode->GetOp()<=SPH_QUERY_PARAGRAPH )
return szNodeNames [ pNode->GetOp()-SPH_QUERY_AND ];
CSphString sTmp;
sTmp.SetSprintf ( "OPERATOR-%d", pNode->GetOp() );
return sTmp;
}
| 137,247
|
C++
|
.cpp
| 4,012
| 31.144317
| 224
| 0.669039
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
16,895
|
compressed_mysql_layer.cpp
|
manticoresoftware_manticoresearch/src/compressed_mysql_layer.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "compressed_mysql_layer.h"
// for UNALIGNED_RAM_ACCESS
#include "sphinxdefs.h"
DWORD MysqlCompressedSocket_c::ReadLSBSmallDword ( InputBuffer_c& tIn )
{
BYTE dNum[3];
tIn.GetBytes ( dNum, 3 );
return (DWORD)dNum[2] << 16 | (DWORD)dNum[1] << 8 | (DWORD)dNum[0];
}
void MysqlCompressedSocket_c::SendLSBSmallDword ( DWORD uValue )
{
#if UNALIGNED_RAM_ACCESS && USE_LITTLE_ENDIAN
m_tOut.SendBytes ( &uValue, 3 );
#else
BYTE dNum[3];
dNum[0] = uValue & 0x000000FFU;
dNum[1] = ( uValue >> 8 ) & 0x000000FFU;
dNum[2] = ( uValue >> 16 ) & 0x000000FFU;
m_tOut.SendBytes ( dNum, 3 );
#endif
}
MysqlCompressedSocket_c::MysqlCompressedSocket_c ( std::unique_ptr<AsyncNetBuffer_c> pFrontend )
: m_pFrontend ( std::move ( pFrontend ) )
, m_tIn ( *m_pFrontend )
, m_tOut ( *m_pFrontend )
{
}
void MysqlCompressedSocket_c::SetWTimeoutUS ( int64_t iTimeoutUS )
{
m_tOut.SetWTimeoutUS ( iTimeoutUS );
};
int64_t MysqlCompressedSocket_c::GetWTimeoutUS() const
{
return m_tOut.GetWTimeoutUS();
}
void MysqlCompressedSocket_c::SetTimeoutUS ( int64_t iTimeoutUS )
{
m_tIn.SetTimeoutUS ( iTimeoutUS );
};
int64_t MysqlCompressedSocket_c::GetTimeoutUS() const
{
return m_tIn.GetTimeoutUS();
}
int64_t MysqlCompressedSocket_c::GetTotalReceived() const
{
return m_tIn.GetTotalReceived();
}
int64_t MysqlCompressedSocket_c::GetTotalSent() const
{
return m_tOut.GetTotalSent();
}
| 1,770
|
C++
|
.cpp
| 60
| 28
| 96
| 0.742049
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,896
|
secondarylib.cpp
|
manticoresoftware_manticoresearch/src/secondarylib.cpp
|
//
// Copyright (c) 2020-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxutils.h"
#include "libutils.h"
#include "fileutils.h"
#include "schema/columninfo.h"
#include "schema/schema.h"
#include "columnarmisc.h"
#include "secondarylib.h"
using CheckStorage_fn = void (*) ( const std::string & sFilename, uint32_t uNumRows, std::function<void (const char*)> & fnError, std::function<void (const char*)> & fnProgress );
using VersionStr_fn = const char * (*)();
using GetVersion_fn = int (*)();
using CreateSI_fn = SI::Index_i * (*) ( const char * sFile, std::string & sError );
using CreateBuilder_fn = SI::Builder_i * (*) ( const common::Schema_t & tSchema, size_t tMemoryLimit, const std::string & sFile, size_t tBufferSize, std::string & sError );
static void * g_pSecondaryLib = nullptr;
static VersionStr_fn g_fnVersionStr = nullptr;
static CreateSI_fn g_fnCreateSI = nullptr;
static CreateBuilder_fn g_fnCreateBuilder = nullptr;
/////////////////////////////////////////////////////////////////////
#if HAVE_DLOPEN
bool InitSecondary ( CSphString & sError )
{
assert ( !g_pSecondaryLib );
CSphString sLibfile = TryDifferentPaths ( LIB_MANTICORE_SECONDARY, GetSecondaryFullpath(), SI::LIB_VERSION );
if ( sLibfile.IsEmpty() )
return true;
if ( !IsSSE42Supported() )
{
sError.SetSprintf ( "MCL requires a CPU that supports SSE 4.2" );
return false;
}
ScopedHandle_c tHandle ( dlopen ( sLibfile.cstr(), RTLD_LAZY | RTLD_LOCAL ) );
if ( !tHandle.Get() )
{
const char * szDlError = dlerror();
sError.SetSprintf ( "dlopen() failed: %s", szDlError ? szDlError : "(null)" );
return true; // if dlopen fails, don't report an error
}
sphLogDebug ( "dlopen(%s)=%p", sLibfile.cstr(), tHandle.Get() );
GetVersion_fn fnGetVersion;
if ( !LoadFunc ( fnGetVersion, tHandle.Get(), "GetSecondaryLibVersion", sLibfile, sError ) )
return false;
int iLibVersion = fnGetVersion();
if ( iLibVersion!=SI::LIB_VERSION )
{
sError.SetSprintf ( "daemon requires secondary library v%d (trying to load v%d)", SI::LIB_VERSION, iLibVersion );
return false;
}
if ( !LoadFunc ( g_fnVersionStr, tHandle.Get(), "GetSecondaryLibVersionStr", sLibfile, sError ) ) return false;
if ( !LoadFunc ( g_fnCreateSI, tHandle.Get(), "CreateSecondaryIndex", sLibfile, sError ) ) return false;
if ( !LoadFunc ( g_fnCreateBuilder, tHandle.Get(), "CreateBuilder", sLibfile, sError ) ) return false;
g_pSecondaryLib = tHandle.Leak();
return true;
}
void ShutdownSecondary()
{
if ( g_pSecondaryLib )
dlclose ( g_pSecondaryLib );
}
#else
bool InitSecondary ( CSphString & sError )
{
return false;
}
void ShutdownSecondary()
{
}
#endif
const char * GetSecondaryVersionStr()
{
if ( !IsSecondaryLibLoaded() )
return nullptr;
assert ( g_fnVersionStr );
return g_fnVersionStr();
}
bool IsSecondaryLibLoaded()
{
return !!g_pSecondaryLib;
}
SI::Index_i * CreateSecondaryIndex ( const char * sFile, CSphString & sError )
{
if ( !IsSecondaryLibLoaded() )
{
sError = "secondary index library not loaded";
return nullptr;
}
assert ( g_fnCreateSI );
std::string sTmpError;
SI::Index_i * pSIdx = g_fnCreateSI ( sFile, sTmpError );
if ( !pSIdx )
sError = sTmpError.c_str();
return pSIdx;
}
std::unique_ptr<SI::Builder_i> CreateSecondaryIndexBuilder ( const common::Schema_t & tSchema, int64_t iMemoryLimit, const CSphString & sFile, int iBufferSize, CSphString & sError )
{
if ( !IsSecondaryLibLoaded() )
{
sError = "secondary index library not loaded";
return nullptr;
}
assert ( g_fnCreateBuilder );
std::string sTmpError;
std::unique_ptr<SI::Builder_i> pBuilder { g_fnCreateBuilder ( tSchema, iMemoryLimit, sFile.cstr(), iBufferSize, sTmpError ) };
if ( !pBuilder )
sError = sTmpError.c_str();
return pBuilder;
}
| 4,116
|
C++
|
.cpp
| 114
| 34.149123
| 181
| 0.708386
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,897
|
tasksavestate.cpp
|
manticoresoftware_manticoresearch/src/tasksavestate.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "tasksavestate.h"
#include "searchdtask.h"
#include "searchdaemon.h"
#include "sphinxplugin.h"
#include "searchdsql.h"
/////////////////////////////////////////////////////////////////////////////
// User variables stuff: store, add, provide hook
/////////////////////////////////////////////////////////////////////////////
static RwLock_t g_tUservarsMutex;
static SmallStringHash_T<Uservar_t> g_hUservars GUARDED_BY ( g_tUservarsMutex );
static void UservarAdd ( const CSphString& sName, CSphVector<SphAttr_t>& dVal, bool bPersist = true )
{
ScWL_t wLock ( g_tUservarsMutex );
Uservar_t* pVar = g_hUservars ( sName );
if ( pVar )
{
// variable exists, release previous value
// actual destruction of the value (aka data) might happen later
// as the concurrent queries might still be using and holding that data
// from here, the old value becomes nameless, though
assert ( pVar->m_eType==USERVAR_INT_SET || pVar->m_eType==USERVAR_INT_SET_TMP );
assert ( pVar->m_pVal );
} else
{
// create a shiny new variable
g_hUservars.Add ( Uservar_t(), sName );
pVar = g_hUservars ( sName );
}
// swap in the new value
assert ( pVar );
pVar->m_eType = bPersist ? USERVAR_INT_SET : USERVAR_INT_SET_TMP;
pVar->m_pVal = new UservarIntSetValues_c; // previous will be auto-released here
pVar->m_pVal->SwapData ( dVal );
}
// create or update the variable
void SetLocalUserVar ( const CSphString& sName, CSphVector<SphAttr_t>& dSetValues )
{
UservarAdd ( sName, dSetValues );
SphinxqlStateFlush ();
}
// create or update the which is not to be saved to state (i.e. exists only during current session)
void SetLocalTemporaryUserVar ( const CSphString & sName, VecTraits_T<DocID_t> dDocids )
{
CSphVector<SphAttr_t> dSetValues;
dSetValues.Append ( dDocids ); // warn! explicit convert from DocID_t to SphAttr_t (as both are int64_t)
dSetValues.Uniq();
UservarAdd ( sName, dSetValues, false );
}
static UservarIntSet_c UservarsHook ( const CSphString& sUservar )
{
ScRL_t rLock ( g_tUservarsMutex );
Uservar_t* pVar = g_hUservars ( sUservar );
if ( !pVar )
return UservarIntSet_c ();
assert ( pVar->m_eType==USERVAR_INT_SET || pVar->m_eType==USERVAR_INT_SET_TMP);
return pVar->m_pVal;
}
void ServeUserVars ()
{
SetUserVarsHook ( UservarsHook );
}
/////////////////////////////////////////////////////////////////////////////
// SphinxQL state (plugins, uservars) management
/////////////////////////////////////////////////////////////////////////////
static CSphString g_sSphinxqlState;
/// process a single line from sphinxql state/startup script
static bool SphinxqlStateLine ( CSphVector<char>& dLine, CSphString* sError )
{
assert ( sError );
if ( !dLine.GetLength ())
return true;
// parser expects CSphString buffer with gap bytes at the end
if ( dLine.Last ()==';' )
dLine.Pop ();
dLine.Add ( '\0' );
dLine.Add ( '\0' );
dLine.Add ( '\0' );
CSphVector <SqlStmt_t> dStmt;
bool bParsedOK = sphParseSqlQuery ( dLine, dStmt, *sError, SPH_COLLATION_DEFAULT );
if ( !bParsedOK )
return false;
bool bOk = true;
ARRAY_FOREACH ( i, dStmt )
{
SqlStmt_t& tStmt = dStmt[i];
if ( tStmt.m_eStmt==STMT_SET && tStmt.m_eSet==SET_GLOBAL_UVAR )
{
tStmt.m_dSetValues.Sort ();
UservarAdd ( tStmt.m_sSetName, tStmt.m_dSetValues );
} else if ( tStmt.m_eStmt==STMT_CREATE_FUNCTION )
{
bOk &= sphPluginCreate ( tStmt.m_sUdfLib.cstr (), PLUGIN_FUNCTION, tStmt.m_sUdfName.cstr (),
tStmt.m_eUdfType, *sError );
} else if ( tStmt.m_eStmt==STMT_CREATE_PLUGIN )
{
bOk &= sphPluginCreate ( tStmt.m_sUdfLib.cstr (), sphPluginGetType ( tStmt.m_sStringParam ),
tStmt.m_sUdfName.cstr (), SPH_ATTR_NONE, *sError );
} else
{
bOk = false;
sError->SetSprintf ( "unsupported statement (must be one of SET GLOBAL, CREATE FUNCTION, CREATE PLUGIN)" );
}
}
return bOk;
}
/// uservars table reader
static void SphinxqlStateRead ( const CSphString& sName )
{
if ( sName.IsEmpty ())
return;
CSphString sError;
CSphAutoreader tReader;
if ( !tReader.Open ( sName, sError ))
return;
const int iReadBlock = 32 * 1024;
const int iGapLen = 2;
CSphVector<char> dLine;
dLine.Reserve ( iReadBlock + iGapLen );
bool bEscaped = false;
int iLines = 0;
while ( true )
{
const BYTE* pData = NULL;
int iRead = tReader.GetBytesZerocopy ( &pData, iReadBlock );
// all uservars got read
if ( iRead<=0 )
break;
// read escaped line
dLine.Reserve ( dLine.GetLength () + iRead + iGapLen );
const BYTE* s = pData;
const BYTE* pEnd = pData + iRead;
while ( s<pEnd )
{
// goto next line for escaped string
if ( *s=='\\' || ( bEscaped && ( *s=='\n' || *s=='\r' )))
{
s++;
while ( s<pEnd && ( *s=='\n' || *s=='\r' ))
{
iLines += ( *s=='\n' );
s++;
}
bEscaped = ( s>=pEnd );
continue;
}
bEscaped = false;
if ( *s=='\n' || *s=='\r' )
{
if ( !SphinxqlStateLine ( dLine, &sError ))
sphWarning ( "sphinxql_state: parse error at line %d: %s", 1 + iLines, sError.cstr ());
dLine.Resize ( 0 );
s++;
while ( s<pEnd && ( *s=='\n' || *s=='\r' ))
{
iLines += ( *s=='\n' );
s++;
}
continue;
}
dLine.Add ( *s );
s++;
}
}
if ( !SphinxqlStateLine ( dLine, &sError ))
sphWarning ( "sphinxql_state: parse error at line %d: %s", 1 + iLines, sError.cstr ());
}
bool InitSphinxqlState ( CSphString dStateFilePath, CSphString& sError )
{
g_sSphinxqlState = std::move ( dStateFilePath );
if ( !g_sSphinxqlState.IsEmpty ())
{
SphinxqlStateRead ( g_sSphinxqlState );
CSphWriter tWriter;
CSphString sNewState;
sNewState.SetSprintf ( "%s.new", g_sSphinxqlState.cstr ());
// initial check that work can be done
bool bCanWrite = tWriter.OpenFile ( sNewState, sError );
tWriter.CloseFile ();
::unlink ( sNewState.cstr ());
if ( !bCanWrite )
{
g_sSphinxqlState = ""; // need to disable thread join on shutdown
return false;
}
// put here to prevent race with check above
SphinxqlStateFlush ();
}
return true;
}
void IterateUservars ( UservarFn&& fnSample )
{
CSphVector<NamedRefVectorPair_t> dUservars;
{
ScRL_t rLock ( g_tUservarsMutex );
dUservars.Reserve ( g_hUservars.GetLength () );
for ( const auto& tUservar : g_hUservars )
if ( !tUservar.second.m_pVal->IsEmpty () )
dUservars.Add ( tUservar );
}
dUservars.Sort ( ::bind ( &NamedRefVectorPair_t::first ) );
dUservars.for_each ( fnSample );
}
/// SphinxQL state writer
/// flushes changes of uservars, UDFs
namespace {
void SphinxqlStateThreadFunc ()
{
static Threads::Coro::Mutex_c tSerializer;
Threads::Coro::ScopedMutex_t tLock { tSerializer };
assert ( !g_sSphinxqlState.IsEmpty ());
CSphString sNewState;
sNewState.SetSprintf ( "%s.new", g_sSphinxqlState.cstr ());
char dBuf[512];
const int iMaxString = 80;
assert (( int ) sizeof ( dBuf )>iMaxString );
CSphString sError;
CSphWriter tWriter;
// stand still till save time
// close and truncate the .new file
if ( !tWriter.OpenFile ( sNewState, sError ))
{
sphWarning ( "sphinxql_state flush failed: %s", sError.cstr ());
return;
}
/////////////
// save UDFs
/////////////
auto pDesc = PublishSystemInfo ( "SphinxQL state save" );
sphPluginSaveState ( tWriter ); // refactor!
/////////////////
// save uservars
/////////////////
IterateUservars ( [&dBuf,&tWriter,iMaxString] ( const NamedRefVectorPair_t &dVar )
{
if ( dVar.second.m_eType==USERVAR_INT_SET_TMP )
return;
const CSphVector<SphAttr_t> & dVals = *dVar.second.m_pVal;
int iLen = snprintf ( dBuf, sizeof ( dBuf ), "SET GLOBAL %s = ( " INT64_FMT, dVar.first.cstr (), dVals[0] );
for ( int j = 1; j<dVals.GetLength (); j++ )
{
iLen += snprintf ( dBuf + iLen, sizeof ( dBuf ), ", " INT64_FMT, dVals[j] );
if ( iLen>=iMaxString && j<dVals.GetLength () - 1 )
{
iLen += snprintf ( dBuf + iLen, sizeof ( dBuf ), " \\\n" );
tWriter.PutBytes ( dBuf, iLen );
iLen = 0;
}
}
if ( iLen )
tWriter.PutBytes ( dBuf, iLen );
char sTail[] = " );\n";
tWriter.PutBytes ( sTail, sizeof ( sTail ) - 1 );
});
/////////////////////////////////
// writing done, flip the burger
/////////////////////////////////
tWriter.CloseFile ();
if ( sph::rename ( sNewState.cstr (), g_sSphinxqlState.cstr ())==0 )
::unlink ( sNewState.cstr ());
else
sphWarning ( "sphinxql_state flush: rename %s to %s failed: %s", sNewState.cstr (), g_sSphinxqlState.cstr (), strerrorm ( errno ));
}
} // namespace
void SphinxqlStateFlush ()
{
if ( g_sSphinxqlState.IsEmpty ())
return;
Threads::StartJob ( SphinxqlStateThreadFunc );
}
| 9,095
|
C++
|
.cpp
| 280
| 29.803571
| 133
| 0.646401
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| true
| false
| false
|
16,898
|
grouper.cpp
|
manticoresoftware_manticoresearch/src/grouper.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "grouper.h"
#include "datetime.h"
#include "exprdatetime.h"
#include "sphinxjson.h"
#include "sphinxint.h"
#include "columnargrouper.h"
class GrouperTraits_c : public CSphGrouper
{
public:
explicit GrouperTraits_c ( const CSphAttrLocator & tLoc ) : m_tLocator ( tLoc ) {}
void GetLocator ( CSphAttrLocator & tOut ) const override { tOut = m_tLocator; }
ESphAttr GetResultType() const override { return m_tLocator.m_iBitCount>8*(int)sizeof(DWORD) ? SPH_ATTR_BIGINT : SPH_ATTR_INTEGER; }
SphGroupKey_t KeyFromMatch ( const CSphMatch & tMatch ) const override { return KeyFromValue ( tMatch.GetAttr ( m_tLocator ) ); }
void MultipleKeysFromMatch ( const CSphMatch & tMatch, CSphVector<SphGroupKey_t> & dKeys ) const override { assert(0); }
protected:
CSphAttrLocator m_tLocator;
};
class GrouperAttr_c : public GrouperTraits_c
{
using GrouperTraits_c::GrouperTraits_c;
public:
CSphGrouper * Clone() const override { return new GrouperAttr_c(m_tLocator); }
SphGroupKey_t KeyFromValue ( SphAttr_t uValue ) const override { return uValue; }
};
template <bool UTC>
class GrouperDay_T : public GrouperTraits_c
{
using GrouperTraits_c::GrouperTraits_c;
public:
CSphGrouper * Clone() const override { return new GrouperDay_T(m_tLocator); }
SphGroupKey_t KeyFromValue ( SphAttr_t uValue ) const override { return CalcYearMonthDay ( ConvertGroupbyTime<UTC> ( (time_t)uValue ) ); }
};
template <bool UTC>
class GrouperWeek_T : public GrouperTraits_c
{
using GrouperTraits_c::GrouperTraits_c;
public:
CSphGrouper * Clone() const override { return new GrouperWeek_T(m_tLocator); }
SphGroupKey_t KeyFromValue ( SphAttr_t uValue ) const override { return CalcYearWeek ( ConvertGroupbyTime<UTC> ( (time_t)uValue ) ); }
};
template <bool UTC>
class GrouperMonth_T : public GrouperTraits_c
{
using GrouperTraits_c::GrouperTraits_c;
public:
CSphGrouper * Clone() const override { return new GrouperMonth_T(m_tLocator); }
SphGroupKey_t KeyFromValue ( SphAttr_t uValue ) const override { return CalcYearMonth ( ConvertGroupbyTime<UTC> ( (time_t)uValue ) ); }
};
template <bool UTC>
class GrouperYear_T : public GrouperTraits_c
{
using GrouperTraits_c::GrouperTraits_c;
public:
CSphGrouper * Clone() const override { return new GrouperYear_T(m_tLocator); }
SphGroupKey_t KeyFromValue ( SphAttr_t uValue ) const override { return ConvertGroupbyTime<UTC> ( (time_t)uValue ).year(); }
};
template <class PRED>
class CSphGrouperString final : public GrouperAttr_c, public PRED
{
public:
explicit CSphGrouperString ( const CSphAttrLocator & tLoc ) : GrouperAttr_c ( tLoc ) {}
ESphAttr GetResultType () const override { return SPH_ATTR_BIGINT; }
CSphGrouper * Clone() const final { return new CSphGrouperString ( m_tLocator ); }
SphGroupKey_t KeyFromMatch ( const CSphMatch & tMatch ) const override
{
auto dBlobAttr = tMatch.FetchAttrData ( m_tLocator, GetBlobPool() );
if ( IsEmpty ( dBlobAttr ) )
return 0;
return PRED::Hash ( dBlobAttr.first,dBlobAttr.second );
}
};
/// lookup JSON key, group by looked up value (used in CSphKBufferJsonGroupSorter)
class CSphGrouperJsonField final : public CSphGrouper
{
public:
CSphGrouperJsonField ( const CSphAttrLocator & tLoc, ISphExpr * pExpr )
: m_tLocator ( tLoc )
, m_pExpr ( pExpr )
{
SafeAddRef ( pExpr );
}
void SetBlobPool ( const BYTE * pBlobPool ) final
{
CSphGrouper::SetBlobPool ( pBlobPool );
if ( m_pExpr )
m_pExpr->Command ( SPH_EXPR_SET_BLOB_POOL, (void*)pBlobPool );
}
SphGroupKey_t KeyFromMatch ( const CSphMatch & tMatch ) const final
{
if ( !m_pExpr )
return SphGroupKey_t();
return m_pExpr->Int64Eval ( tMatch );
}
void GetLocator ( CSphAttrLocator & tOut ) const final { tOut = m_tLocator; }
ESphAttr GetResultType() const final { return SPH_ATTR_BIGINT; }
void MultipleKeysFromMatch ( const CSphMatch & tMatch, CSphVector<SphGroupKey_t> & dKeys ) const final { assert(0); }
SphGroupKey_t KeyFromValue ( SphAttr_t ) const final { assert(0); return SphGroupKey_t(); }
CSphGrouper * Clone() const final { return new CSphGrouperJsonField (*this); }
protected:
CSphGrouperJsonField ( const CSphGrouperJsonField & rhs )
: m_tLocator ( rhs.m_tLocator )
, m_pExpr ( SafeClone ( rhs.m_pExpr ) )
{}
CSphAttrLocator m_tLocator;
ISphExprRefPtr_c m_pExpr;
};
template <class PRED>
class GrouperStringExpr_T final : public CSphGrouper, public PRED
{
public:
explicit GrouperStringExpr_T ( ISphExpr * pExpr )
: m_pExpr ( pExpr )
{
assert(m_pExpr);
SafeAddRef(pExpr);
}
void GetLocator ( CSphAttrLocator & tOut ) const final {}
ESphAttr GetResultType () const final { return SPH_ATTR_BIGINT; }
SphGroupKey_t KeyFromValue ( SphAttr_t ) const final { assert(0); return SphGroupKey_t(); }
void MultipleKeysFromMatch ( const CSphMatch & tMatch, CSphVector<SphGroupKey_t> & dKeys ) const final { assert(0); }
SphGroupKey_t KeyFromMatch ( const CSphMatch & tMatch ) const final
{
assert ( !m_pExpr->IsDataPtrAttr() );
const BYTE * pStr = nullptr;
int iLen = m_pExpr->StringEval ( tMatch, &pStr );
if ( !iLen )
return 0;
return PRED::Hash ( pStr, iLen );
}
CSphGrouper * Clone() const final { return new GrouperStringExpr_T(*this); }
void SetColumnar ( const columnar::Columnar_i * pColumnar ) final { m_pExpr->Command ( SPH_EXPR_SET_COLUMNAR, (void*)pColumnar ); }
protected:
GrouperStringExpr_T (const GrouperStringExpr_T& rhs)
: m_pExpr { SafeClone ( rhs.m_pExpr ) }
{}
ISphExprRefPtr_c m_pExpr;
};
template <typename MVA, typename ADDER>
static void AddGroupedMVA ( ADDER && fnAdd, const ByteBlob_t& dRawMVA )
{
VecTraits_T<MVA> dMvas {dRawMVA};
for ( auto & tValue : dMvas )
fnAdd ( sphUnalignedRead(tValue) );
}
template<typename T, bool PTR>
static void FetchMVAKeys ( CSphVector<SphGroupKey_t> & dKeys, const CSphMatch & tMatch, const CSphAttrLocator & tLocator, const BYTE * pBlobPool )
{
dKeys.Resize(0);
int iLengthBytes = 0;
const BYTE * pMva = nullptr;
if constexpr ( PTR )
{
auto pPacked = (const BYTE *)tMatch.GetAttr(tLocator);
ByteBlob_t dUnpacked = sphUnpackPtrAttr(pPacked);
pMva = dUnpacked.first;
iLengthBytes = dUnpacked.second;
}
else
{
if ( !pBlobPool )
return;
pMva = sphGetBlobAttr ( tMatch, tLocator, pBlobPool, iLengthBytes );
}
int iNumValues = iLengthBytes / sizeof(T);
const T * pValues = (const T*)pMva;
dKeys.Resize(iNumValues);
for ( int i = 0; i<iNumValues; i++ )
dKeys[i] = (SphGroupKey_t)pValues[i];
}
template <class PRED, bool HAVE_COLUMNAR>
class CSphGrouperMulti final: public CSphGrouper, public PRED
{
using MYTYPE = CSphGrouperMulti<PRED,HAVE_COLUMNAR>;
public:
CSphGrouperMulti ( const CSphVector<CSphColumnInfo> & dAttrs, VecRefPtrs_t<ISphExpr *> dJsonKeys, ESphCollation eCollation );
SphGroupKey_t KeyFromMatch ( const CSphMatch & tMatch ) const final;
void SetBlobPool ( const BYTE * pBlobPool ) final;
void SetColumnar ( const columnar::Columnar_i * pColumnar ) final;
CSphGrouper * Clone() const final;
void MultipleKeysFromMatch ( const CSphMatch & tMatch, CSphVector<SphGroupKey_t> & dKeys ) const final;
SphGroupKey_t KeyFromValue ( SphAttr_t ) const final { assert(0); return SphGroupKey_t(); }
void GetLocator ( CSphAttrLocator & ) const final { assert(0); }
ESphAttr GetResultType() const final { return SPH_ATTR_BIGINT; }
bool IsMultiValue() const final;
private:
CSphVector<CSphColumnInfo> m_dAttrs;
VecRefPtrs_t<ISphExpr *> m_dJsonKeys;
ESphCollation m_eCollation = SPH_COLLATION_DEFAULT;
CSphVector<CSphRefcountedPtr<CSphGrouper>> m_dSingleKeyGroupers;
CSphVector<CSphRefcountedPtr<CSphGrouper>> m_dMultiKeyGroupers;
SphGroupKey_t FetchStringKey ( const CSphMatch & tMatch, const CSphAttrLocator & tLocator, SphGroupKey_t tPrevKey ) const;
void SpawnColumnarGroupers();
};
template <class PRED, bool HAVE_COLUMNAR>
CSphGrouperMulti<PRED,HAVE_COLUMNAR>::CSphGrouperMulti ( const CSphVector<CSphColumnInfo> & dAttrs, VecRefPtrs_t<ISphExpr *> dJsonKeys, ESphCollation eCollation )
: m_dAttrs ( dAttrs )
, m_dJsonKeys ( std::move(dJsonKeys) )
, m_eCollation ( eCollation )
{
assert ( dAttrs.GetLength()>1 );
assert ( dAttrs.GetLength()==m_dJsonKeys.GetLength() );
if constexpr ( HAVE_COLUMNAR )
SpawnColumnarGroupers();
}
template <class PRED, bool HAVE_COLUMNAR>
SphGroupKey_t CSphGrouperMulti<PRED,HAVE_COLUMNAR>::KeyFromMatch ( const CSphMatch & tMatch ) const
{
auto tKey = ( SphGroupKey_t ) SPH_FNV64_SEED;
for ( int i=0; i<m_dAttrs.GetLength(); i++ )
{
if constexpr ( HAVE_COLUMNAR )
{
if ( m_dSingleKeyGroupers[i] )
{
// use pre-spawned grouper
SphGroupKey_t tColumnarKey = m_dSingleKeyGroupers[i]->KeyFromMatch(tMatch);
tKey = ( SphGroupKey_t ) sphFNV64 ( tColumnarKey, tKey );
continue;
}
}
switch ( m_dAttrs[i].m_eAttrType )
{
case SPH_ATTR_STRING:
case SPH_ATTR_STRINGPTR:
tKey = FetchStringKey ( tMatch, m_dAttrs[i].m_tLocator, tKey );
break;
default:
{
SphAttr_t tAttr = tMatch.GetAttr ( m_dAttrs[i].m_tLocator );
tKey = ( SphGroupKey_t ) sphFNV64 ( tAttr, tKey );
}
break;
}
}
return tKey;
}
template <class PRED, bool HAVE_COLUMNAR>
void CSphGrouperMulti<PRED, HAVE_COLUMNAR>::SetBlobPool ( const BYTE * pBlobPool )
{
CSphGrouper::SetBlobPool ( pBlobPool );
for ( auto & i : m_dJsonKeys )
if ( i )
i->Command ( SPH_EXPR_SET_BLOB_POOL, (void*)pBlobPool );
// might be JSON whole attribute not the json.field stored in the m_dJsonKeys
for ( auto & tAttr : m_dAttrs )
{
if ( tAttr.m_pExpr )
tAttr.m_pExpr->Command ( SPH_EXPR_SET_BLOB_POOL, (void*)pBlobPool );
}
}
template <class PRED, bool HAVE_COLUMNAR>
void CSphGrouperMulti<PRED,HAVE_COLUMNAR>::SetColumnar ( const columnar::Columnar_i * pColumnar )
{
CSphGrouper::SetColumnar ( pColumnar );
for ( auto & i : m_dSingleKeyGroupers )
if ( i )
i->SetColumnar ( pColumnar );
for ( auto & i : m_dMultiKeyGroupers )
if ( i )
i->SetColumnar ( pColumnar );
}
template <class PRED, bool HAVE_COLUMNAR>
CSphGrouper * CSphGrouperMulti<PRED,HAVE_COLUMNAR>::Clone() const
{
VecRefPtrs_t<ISphExpr *> dJsonKeys;
m_dJsonKeys.for_each ( [&dJsonKeys] ( ISphExpr * p ) { dJsonKeys.Add ( SafeClone ( p ) ); } );
return new MYTYPE ( m_dAttrs, std::move(dJsonKeys), m_eCollation );
}
template <class PRED, bool HAVE_COLUMNAR>
void CSphGrouperMulti<PRED,HAVE_COLUMNAR>::MultipleKeysFromMatch ( const CSphMatch & tMatch, CSphVector<SphGroupKey_t> & dKeys ) const
{
dKeys.Resize(0);
CSphFixedVector<CSphVector<SphGroupKey_t>> dAllKeys { m_dAttrs.GetLength() };
for ( int i=0; i<m_dAttrs.GetLength(); i++ )
{
auto & dCurKeys = dAllKeys[i];
if constexpr ( HAVE_COLUMNAR )
{
if ( m_dMultiKeyGroupers[i] )
{
// use pre-spawned grouper
m_dMultiKeyGroupers[i]->MultipleKeysFromMatch ( tMatch, dCurKeys );
continue;
}
}
switch ( m_dAttrs[i].m_eAttrType )
{
case SPH_ATTR_UINT32SET:
FetchMVAKeys<DWORD,false> ( dCurKeys, tMatch, m_dAttrs[i].m_tLocator, GetBlobPool() );
break;
case SPH_ATTR_INT64SET:
FetchMVAKeys<int64_t,false> ( dCurKeys, tMatch, m_dAttrs[i].m_tLocator, GetBlobPool() );
break;
case SPH_ATTR_JSON:
PushJsonField ( m_dJsonKeys[i]->Int64Eval(tMatch), m_pBlobPool, [&dCurKeys]( SphAttr_t * pAttr, SphGroupKey_t uMatchGroupKey ){ dCurKeys.Add(uMatchGroupKey); return true; } );
break;
case SPH_ATTR_JSON_FIELD:
{
assert ( m_dAttrs[i].m_pExpr );
PushJsonField ( m_dAttrs[i].m_pExpr->Int64Eval ( tMatch ), m_pBlobPool, [&dCurKeys]( SphAttr_t * pAttr, SphGroupKey_t uMatchGroupKey )
{
dCurKeys.Add ( uMatchGroupKey );
return true;
});
}
break;
case SPH_ATTR_STRING:
case SPH_ATTR_STRINGPTR:
{
SphGroupKey_t tStringKey = FetchStringKey ( tMatch, m_dAttrs[i].m_tLocator, SPH_FNV64_SEED );
if ( tStringKey!=(SphGroupKey_t)SPH_FNV64_SEED )
dAllKeys[i].Add ( tStringKey );
}
break;
default:
dAllKeys[i].Add ( tMatch.GetAttr ( m_dAttrs[i].m_tLocator ) );
break;
}
}
CSphFixedVector<int> dIndexes { m_dAttrs.GetLength() };
dIndexes.ZeroVec();
do
{
auto tKey = ( SphGroupKey_t ) SPH_FNV64_SEED;
ARRAY_FOREACH ( i, dAllKeys )
if ( dAllKeys[i].GetLength() )
tKey = (SphGroupKey_t)sphFNV64 ( dAllKeys[i][dIndexes[i]], tKey );
dKeys.Add(tKey);
}
while ( NextSet ( dIndexes, dAllKeys ) );
}
template <class PRED, bool HAVE_COLUMNAR>
bool CSphGrouperMulti<PRED,HAVE_COLUMNAR>::IsMultiValue() const
{
return m_dAttrs.any_of ( []( auto & tAttr ){ return tAttr.m_eAttrType==SPH_ATTR_JSON || tAttr.m_eAttrType==SPH_ATTR_JSON_FIELD || tAttr.m_eAttrType==SPH_ATTR_UINT32SET || tAttr.m_eAttrType==SPH_ATTR_INT64SET; } );
}
template <class PRED, bool HAVE_COLUMNAR>
SphGroupKey_t CSphGrouperMulti<PRED,HAVE_COLUMNAR>::FetchStringKey ( const CSphMatch & tMatch, const CSphAttrLocator & tLocator, SphGroupKey_t tPrevKey ) const
{
ByteBlob_t tData = tMatch.FetchAttrData ( tLocator, GetBlobPool() );
if ( !tData.first || !tData.second )
return tPrevKey;
return PRED::Hash ( tData.first, tData.second, tPrevKey );
}
template <class PRED, bool HAVE_COLUMNAR>
void CSphGrouperMulti<PRED,HAVE_COLUMNAR>::SpawnColumnarGroupers()
{
m_dSingleKeyGroupers.Resize ( m_dAttrs.GetLength() );
m_dMultiKeyGroupers.Resize ( m_dAttrs.GetLength() );
ARRAY_FOREACH ( i, m_dAttrs )
{
const auto & tAttr = m_dAttrs[i];
if ( !tAttr.IsColumnar() && !tAttr.IsColumnarExpr() )
continue;
switch ( tAttr.m_eAttrType )
{
case SPH_ATTR_STRING:
case SPH_ATTR_STRINGPTR:
m_dSingleKeyGroupers[i] = CreateGrouperColumnarString ( tAttr.m_sName, m_eCollation );
break;
case SPH_ATTR_UINT32SET:
case SPH_ATTR_UINT32SET_PTR:
case SPH_ATTR_INT64SET:
case SPH_ATTR_INT64SET_PTR:
m_dMultiKeyGroupers[i] = CreateGrouperColumnarMVA ( tAttr.m_sName, tAttr.m_eAttrType );
break;
default:
m_dSingleKeyGroupers[i] = CreateGrouperColumnarInt ( tAttr.m_sName, tAttr.m_eAttrType );
break;
}
}
}
template<typename T, bool PTR>
class GrouperMVA_T : public CSphGrouper
{
public:
explicit GrouperMVA_T ( const CSphAttrLocator & tLocator ) : m_tLocator ( tLocator ) {}
SphGroupKey_t KeyFromValue ( SphAttr_t ) const override { assert(0); return SphGroupKey_t(); }
SphGroupKey_t KeyFromMatch ( const CSphMatch & tMatch ) const override { assert(0); return SphGroupKey_t(); }
void MultipleKeysFromMatch ( const CSphMatch & tMatch, CSphVector<SphGroupKey_t> & dKeys ) const override { FetchMVAKeys<T,PTR> ( dKeys, tMatch, m_tLocator, GetBlobPool() ); }
void GetLocator ( CSphAttrLocator & tOut ) const override { tOut = m_tLocator; }
ESphAttr GetResultType () const override;
CSphGrouper * Clone() const override { return new GrouperMVA_T ( m_tLocator ); }
bool IsMultiValue() const override { return true; }
private:
CSphAttrLocator m_tLocator;
};
template<> ESphAttr GrouperMVA_T<DWORD,true>::GetResultType() const { return SPH_ATTR_INTEGER; }
template<> ESphAttr GrouperMVA_T<DWORD,false>::GetResultType() const { return SPH_ATTR_INTEGER; }
template<> ESphAttr GrouperMVA_T<int64_t,true>::GetResultType() const { return SPH_ATTR_BIGINT; }
template<> ESphAttr GrouperMVA_T<int64_t,false>::GetResultType() const { return SPH_ATTR_BIGINT; }
/////////////////////////////////////////////////////////////////////////////
CSphGrouper * CreateGrouperDay ( const CSphAttrLocator & tLoc )
{
return GetGroupingInUTC() ? (CSphGrouper *)new GrouperDay_T<true>(tLoc) : (CSphGrouper *)new GrouperDay_T<false>(tLoc);
}
CSphGrouper * CreateGrouperWeek ( const CSphAttrLocator & tLoc )
{
return GetGroupingInUTC() ? (CSphGrouper *)new GrouperWeek_T<true>(tLoc) : (CSphGrouper *)new GrouperWeek_T<false>(tLoc);
}
CSphGrouper * CreateGrouperMonth ( const CSphAttrLocator & tLoc )
{
return GetGroupingInUTC() ? (CSphGrouper *)new GrouperMonth_T<true>(tLoc) : (CSphGrouper *)new GrouperMonth_T<false>(tLoc);
}
CSphGrouper * CreateGrouperYear ( const CSphAttrLocator & tLoc )
{
return GetGroupingInUTC() ? (CSphGrouper *)new GrouperYear_T<true>(tLoc) : (CSphGrouper *)new GrouperYear_T<false>(tLoc);
}
CSphGrouper * CreateGrouperJsonField ( const CSphAttrLocator & tLoc, ISphExpr * pExpr )
{
return new CSphGrouperJsonField ( tLoc, pExpr );
}
CSphGrouper * CreateGrouperMVA32 ( const CSphAttrLocator & tLoc )
{
if ( tLoc.m_bDynamic )
return new GrouperMVA_T<DWORD,true>(tLoc);
return new GrouperMVA_T<DWORD,false>(tLoc);
}
CSphGrouper * CreateGrouperMVA64 ( const CSphAttrLocator & tLoc )
{
if ( tLoc.m_bDynamic )
return new GrouperMVA_T<int64_t,true>(tLoc);
return new GrouperMVA_T<int64_t,false>(tLoc);
}
CSphGrouper * CreateGrouperAttr ( const CSphAttrLocator & tLoc )
{
return new GrouperAttr_c(tLoc);
}
CSphGrouper * CreateGrouperString ( const CSphAttrLocator & tLoc, ESphCollation eCollation )
{
switch ( eCollation )
{
case SPH_COLLATION_UTF8_GENERAL_CI: return new CSphGrouperString<Utf8CIHash_fn>(tLoc);
case SPH_COLLATION_LIBC_CI: return new CSphGrouperString<LibcCIHash_fn>(tLoc);
case SPH_COLLATION_LIBC_CS: return new CSphGrouperString<LibcCSHash_fn>(tLoc);
default: return new CSphGrouperString<BinaryHash_fn>(tLoc);
}
}
CSphGrouper * CreateGrouperStringExpr ( ISphExpr * pExpr, ESphCollation eCollation )
{
switch ( eCollation )
{
case SPH_COLLATION_UTF8_GENERAL_CI: return new GrouperStringExpr_T<Utf8CIHash_fn>(pExpr);
case SPH_COLLATION_LIBC_CI: return new GrouperStringExpr_T<LibcCIHash_fn>(pExpr);
case SPH_COLLATION_LIBC_CS: return new GrouperStringExpr_T<LibcCSHash_fn>(pExpr);
default: return new GrouperStringExpr_T<BinaryHash_fn>(pExpr);
}
}
CSphGrouper * CreateGrouperMulti ( const CSphVector<CSphColumnInfo> & dAttrs, VecRefPtrs_t<ISphExpr *> dJsonKeys, ESphCollation eCollation )
{
bool bHaveColumnar = dAttrs.any_of ( []( auto & tAttr ){ return tAttr.IsColumnar() || tAttr.IsColumnarExpr(); } );
bool bAllColumnar = dAttrs.all_of ( []( auto & tAttr ){ return tAttr.IsColumnar() || tAttr.IsColumnarExpr(); } );
if ( bAllColumnar )
return CreateGrouperColumnarMulti ( dAttrs, eCollation );
switch ( eCollation )
{
case SPH_COLLATION_UTF8_GENERAL_CI:
if ( bHaveColumnar )
return new CSphGrouperMulti<Utf8CIHash_fn,true> ( dAttrs, std::move(dJsonKeys), eCollation );
else
return new CSphGrouperMulti<Utf8CIHash_fn,false> ( dAttrs, std::move(dJsonKeys), eCollation );
case SPH_COLLATION_LIBC_CI:
if ( bHaveColumnar )
return new CSphGrouperMulti<LibcCIHash_fn,true> ( dAttrs, std::move(dJsonKeys), eCollation );
else
return new CSphGrouperMulti<LibcCIHash_fn,false> ( dAttrs, std::move(dJsonKeys), eCollation );
case SPH_COLLATION_LIBC_CS:
if ( bHaveColumnar )
return new CSphGrouperMulti<LibcCSHash_fn,true> ( dAttrs, std::move(dJsonKeys), eCollation );
else
return new CSphGrouperMulti<LibcCSHash_fn,false> ( dAttrs, std::move(dJsonKeys), eCollation );
default:
if ( bHaveColumnar )
return new CSphGrouperMulti<BinaryHash_fn,true> ( dAttrs, std::move(dJsonKeys), eCollation );
else
return new CSphGrouperMulti<BinaryHash_fn,false> ( dAttrs, std::move(dJsonKeys), eCollation );
}
}
/////////////////////////////////////////////////////////////////////////////
class DistinctFetcher_c : public DistinctFetcher_i
{
public:
explicit DistinctFetcher_c ( const CSphAttrLocator & tLocator ) : m_tLocator(tLocator) {}
void SetColumnar ( const columnar::Columnar_i * pColumnar ) override {}
void SetBlobPool ( const BYTE * pBlobPool ) override { m_pBlobPool = pBlobPool; }
void FixupLocators ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) override { sphFixupLocator ( m_tLocator, pOldSchema, pNewSchema ); }
protected:
CSphAttrLocator m_tLocator;
const BYTE * m_pBlobPool = nullptr;
};
class DistinctFetcherPlain_c : public DistinctFetcher_c
{
using DistinctFetcher_c::DistinctFetcher_c;
public:
void GetKeys ( const CSphMatch & tMatch, CSphVector<SphAttr_t> & dKeys ) const override { assert ( 0 && " Requesting multiple keys from plain distinct fetcher" ); }
bool IsMultiValue() const override { return false; }
};
class DistinctFetcherMulti_c : public DistinctFetcher_c
{
using DistinctFetcher_c::DistinctFetcher_c;
public:
SphAttr_t GetKey ( const CSphMatch & tMatch ) const override { assert ( 0 && " Requesting single keys from multi distinct fetcher" ); return 0; }
bool IsMultiValue() const override { return true; }
};
class DistinctFetcherInt_c : public DistinctFetcherPlain_c
{
using DistinctFetcherPlain_c::DistinctFetcherPlain_c;
public:
SphAttr_t GetKey ( const CSphMatch & tMatch ) const override { return tMatch.GetAttr(m_tLocator); }
DistinctFetcher_i * Clone() const override { return new DistinctFetcherInt_c(m_tLocator); }
};
class DistinctFetcherString_c : public DistinctFetcherPlain_c
{
using DistinctFetcherPlain_c::DistinctFetcherPlain_c;
public:
SphAttr_t GetKey ( const CSphMatch & tMatch ) const override;
DistinctFetcher_i * Clone() const override { return new DistinctFetcherString_c(m_tLocator); }
};
SphAttr_t DistinctFetcherString_c::GetKey ( const CSphMatch & tMatch ) const
{
auto dBlob = tMatch.FetchAttrData ( m_tLocator, m_pBlobPool );
return (SphAttr_t) sphFNV64 ( dBlob );
}
class DistinctFetcherJsonField_c : public DistinctFetcherMulti_c
{
using DistinctFetcherMulti_c::DistinctFetcherMulti_c;
public:
void GetKeys ( const CSphMatch & tMatch, CSphVector<SphAttr_t> & dKeys ) const override;
DistinctFetcher_i * Clone() const override { return new DistinctFetcherJsonField_c(m_tLocator); }
};
void DistinctFetcherJsonField_c::GetKeys ( const CSphMatch & tMatch, CSphVector<SphAttr_t> & dKeys ) const
{
dKeys.Resize(0);
PushJsonField ( tMatch.GetAttr(m_tLocator), m_pBlobPool, [&dKeys]( SphAttr_t * pAttr, SphGroupKey_t uGroupKey )
{
if ( uGroupKey )
dKeys.Add(uGroupKey);
return true;
} );
}
class DistinctFetcherJsonFieldPtr_c : public DistinctFetcherMulti_c
{
using DistinctFetcherMulti_c::DistinctFetcherMulti_c;
public:
void GetKeys ( const CSphMatch & tMatch, CSphVector<SphAttr_t> & dKeys ) const override;
DistinctFetcher_i * Clone() const override { return new DistinctFetcherJsonField_c(m_tLocator); }
};
void DistinctFetcherJsonFieldPtr_c::GetKeys ( const CSphMatch & tMatch, CSphVector<SphAttr_t> & dKeys ) const
{
dKeys.Resize(0);
auto pValue = (const BYTE *)tMatch.GetAttr(m_tLocator);
if ( !pValue )
return;
auto tBlob = sphUnpackPtrAttr(pValue);
pValue = tBlob.first;
ESphJsonType eJson = (ESphJsonType)*pValue++;
PushJsonFieldPtr ( pValue, eJson, [&dKeys]( SphGroupKey_t uGroupKey )
{
if ( uGroupKey )
dKeys.Add(uGroupKey);
return true;
}
);
}
template<typename T>
class DistinctFetcherMva_T : public DistinctFetcherMulti_c
{
using DistinctFetcherMulti_c::DistinctFetcherMulti_c;
public:
void GetKeys ( const CSphMatch & tMatch, CSphVector<SphAttr_t> & dKeys ) const override;
DistinctFetcher_i * Clone() const override { return new DistinctFetcherMva_T(m_tLocator); }
};
template<typename T>
void DistinctFetcherMva_T<T>::GetKeys ( const CSphMatch & tMatch, CSphVector<SphAttr_t> & dKeys ) const
{
dKeys.Resize(0);
AddGroupedMVA<T> ( [&dKeys]( SphAttr_t tAttr ){ dKeys.Add(tAttr); }, tMatch.FetchAttrData ( m_tLocator, m_pBlobPool ) );
}
DistinctFetcher_i * CreateDistinctFetcher ( const CSphString & sName, const CSphAttrLocator & tLocator, ESphAttr eType )
{
switch ( eType )
{
case SPH_ATTR_STRING:
case SPH_ATTR_STRINGPTR: return new DistinctFetcherString_c(tLocator);
case SPH_ATTR_JSON_FIELD: return new DistinctFetcherJsonField_c(tLocator);
case SPH_ATTR_JSON_FIELD_PTR: return new DistinctFetcherJsonFieldPtr_c(tLocator);
case SPH_ATTR_UINT32SET:
case SPH_ATTR_UINT32SET_PTR: return new DistinctFetcherMva_T<DWORD>(tLocator);
case SPH_ATTR_INT64SET:
case SPH_ATTR_INT64SET_PTR: return new DistinctFetcherMva_T<int64_t>(tLocator);
default: return new DistinctFetcherInt_c(tLocator);
}
}
| 24,454
|
C++
|
.cpp
| 604
| 38.134106
| 214
| 0.737704
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,899
|
snippetpassage.cpp
|
manticoresoftware_manticoresearch/src/snippetpassage.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "snippetpassage.h"
#include "snippetindex.h"
#include "sphinxexcerpt.h"
static bool operator < ( const Passage_t & a, const Passage_t & b )
{
if ( a.m_iUniqQwords==b.m_iUniqQwords )
{
int iWeightA = a.GetWeight();
int iWeightB = b.GetWeight();
return iWeightA==iWeightB ? a.m_iCodes < b.m_iCodes : iWeightA < iWeightB;
} else
return a.m_iUniqQwords < b.m_iUniqQwords;
}
struct PassagePositionOrder_fn
{
inline bool IsLess ( const Passage_t & a, const Passage_t & b ) const
{
return a.m_iStart < b.m_iStart;
}
};
//////////////////////////////////////////////////////////////////////////
void Passage_t::Reset()
{
m_iStart = 0;
m_iTokens = 0;
m_iCodes = 0;
m_uQwords = 0;
m_iQwordsWeight = 0;
m_iQwordCount = 0;
m_iUniqQwords = 0;
m_iMaxLCS = 0;
m_iMinGap = 0;
m_iAroundBefore = 0;
m_iAroundAfter = 0;
m_iCodesBetweenKeywords = 0;
m_iWordsBetweenKeywords = 0;
m_iField = 0;
m_dBeforeTokens.Resize(0);
m_dAfterTokens.Resize(0);
}
void Passage_t::CopyData ( Passage_t & tPassage )
{
m_iStart = tPassage.m_iStart;
m_iTokens = tPassage.m_iTokens;
m_iCodes = tPassage.m_iCodes;
m_iWords = tPassage.m_iWords;
m_uQwords = tPassage.m_uQwords;
m_iQwordsWeight = tPassage.m_iQwordsWeight;
m_iQwordCount = tPassage.m_iQwordCount;
m_iUniqQwords = tPassage.m_iUniqQwords;
m_iMaxLCS = tPassage.m_iMaxLCS;
m_iMinGap = tPassage.m_iMinGap;
m_iStartLimit = tPassage.m_iStartLimit;
m_iEndLimit = tPassage.m_iEndLimit;
m_iAroundBefore = tPassage.m_iAroundBefore;
m_iAroundAfter = tPassage.m_iAroundAfter;
m_iCodesBetweenKeywords = tPassage.m_iCodesBetweenKeywords;
m_iWordsBetweenKeywords = tPassage.m_iWordsBetweenKeywords;
m_iField = tPassage.m_iField;
m_dBeforeTokens.SwapData ( tPassage.m_dBeforeTokens );
m_dAfterTokens.SwapData ( tPassage.m_dAfterTokens );
}
int Passage_t::GetWeight() const
{
return m_iQwordCount + m_iQwordsWeight*m_iMaxLCS + m_iMinGap;
}
//////////////////////////////////////////////////////////////////////////
static CSphVector<Passage_t> SelectBestPassages ( const CSphVector<Passage_t> & dPassages, const SnippetLimits_t & tLimits, const SnippetQuerySettings_t & tSettings, const SnippetsDocIndex_c & tContainer, DWORD uFoundWords )
{
CSphVector<Passage_t> dShow;
if ( !dPassages.GetLength() )
return dShow;
// our limits
int iMaxPassages = tLimits.m_iLimitPassages
? Min ( dPassages.GetLength(), tLimits.m_iLimitPassages )
: dPassages.GetLength();
int iMaxWords = tLimits.m_iLimitWords ? tLimits.m_iLimitWords : INT_MAX;
int iMaxCp = tLimits.m_iLimit ? tLimits.m_iLimit : INT_MAX;
DWORD uWords = 0; // mask of words in dShow so far
int iTotalCodes = 0;
int iTotalWords = 0;
int iTotalKeywordCodes = 0;
int iTotalKeywordWords = 0;
CSphVector<int> dWeights ( dPassages.GetLength() );
ARRAY_FOREACH ( i, dPassages )
dWeights[i] = dPassages[i].m_iQwordsWeight;
// collect enough best passages to show all keywords and max out the limits
// don't care much if we're going over limits in this loop, it will be tightened below
bool bAll = false;
while ( dShow.GetLength() < iMaxPassages )
{
// get next best passage
int iBest = -1;
ARRAY_FOREACH ( i, dPassages )
{
Passage_t & tPass = dPassages[i];
if ( tPass.m_iCodes && ( iBest==-1 || dPassages[iBest] < tPass ) )
iBest = i;
}
if ( iBest<0 )
break;
Passage_t & tBest = dPassages[iBest];
// force_all_keywords passage may be very big, so let's allow to show one of them
if ( !tSettings.m_bForceAllWords || dShow.GetLength() )
if ( iTotalKeywordCodes+tBest.m_iCodesBetweenKeywords>iMaxCp || iTotalKeywordWords+tBest.m_iWordsBetweenKeywords>iMaxWords )
break;
bool bFits = iTotalCodes+tBest.m_iCodes<=iMaxCp && iTotalWords+tBest.m_iWords<=iMaxWords;
if ( uWords==uFoundWords && !bFits )
{
// there might be just enough space to partially display this passage
if ( iTotalCodes+tBest.m_iCodesBetweenKeywords<=iMaxCp && iTotalWords+tBest.m_iWordsBetweenKeywords<=iMaxWords )
{
iTotalWords += tBest.m_iWords;
iTotalCodes += tBest.m_iCodes;
dShow.Add ( tBest );
}
break;
}
// save it, despite limits or whatever, we'll tighten everything in the loop below
dShow.Add ( tBest );
uWords |= tBest.m_uQwords;
iTotalKeywordWords += tBest.m_iWordsBetweenKeywords;
iTotalKeywordCodes += tBest.m_iCodesBetweenKeywords;
iTotalWords += tBest.m_iWords;
iTotalCodes += tBest.m_iCodes;
tBest.m_iCodes = 0; // no longer needed here, abusing to mark displayed passages
// we just managed to show all words? do one final re-weighting run
if ( !bAll && uWords==uFoundWords )
{
bAll = true;
ARRAY_FOREACH ( i, dPassages )
dPassages[i].m_iQwordsWeight = dWeights[i];
}
// if we're already showing all words, re-weighting is not needed any more
if ( bAll )
continue;
// re-weight passages, adjust for new mask of shown words
// FIXME! re-weighting doesn't change m_iQwordCount (and qwords could possibly be duplicated) and LCS
ARRAY_FOREACH ( i, dPassages )
{
Passage_t & tPass = dPassages[i];
if ( !tPass.m_iCodes )
continue;
DWORD uMask = tBest.m_uQwords;
for ( int iWord=0; uMask; iWord++, uMask >>= 1 )
if ( ( uMask & 1 ) && ( tPass.m_uQwords & ( 1UL<<iWord ) ) )
{
tPass.m_iQwordsWeight -= tContainer.GetTermWeight(iWord);
tPass.m_iQwordCount--; // doesn't account for dupes
tPass.m_iUniqQwords--;
}
tPass.m_uQwords &= ~uWords;
}
}
// if all passages won't fit into the limit, try to trim them a bit
//
// this step is skipped when use_boundaries is enabled, because
// each passage must be a separate sentence (delimited by
// boundaries) and we don't want to split them
if ( ( iTotalCodes > iMaxCp || iTotalWords > iMaxWords ) && !tSettings.m_bUseBoundaries )
{
// trim passages
bool bFirst = true;
bool bDone = false;
int iCodes = iTotalCodes;
while ( !bDone )
{
// drop one token from each passage starting from the least relevant
for ( int i=dShow.GetLength(); i > 0; i-- )
{
Passage_t & tPassage = dShow[i-1];
if ( !tPassage.m_dBeforeTokens.GetLength() && !tPassage.m_dAfterTokens.GetLength() )
continue;
bool bDropFirst;
if ( tPassage.m_dBeforeTokens.GetLength() > tPassage.m_dAfterTokens.GetLength() )
bDropFirst = true;
else if ( tPassage.m_dBeforeTokens.GetLength() < tPassage.m_dAfterTokens.GetLength() )
bDropFirst = false;
else if ( !tPassage.m_dBeforeTokens.Last().m_iWordFlag && tPassage.m_dAfterTokens.Last().m_iWordFlag )
bDropFirst = true;
else if ( tPassage.m_dBeforeTokens.Last().m_iWordFlag && !tPassage.m_dAfterTokens.Last().m_iWordFlag )
bDropFirst = false;
else
bDropFirst = bFirst;
if ( bDropFirst )
{
// drop first
const StoredExcerptToken_t & tTok = tPassage.m_dBeforeTokens.Pop();
tPassage.m_iStart++;
tPassage.m_iTokens--;
tPassage.m_iCodes -= tTok.m_iLengthCP;
iTotalCodes -= tTok.m_iLengthCP;
iTotalWords -= tTok.m_iWordFlag;
} else
{
// drop last
const StoredExcerptToken_t & tTok = tPassage.m_dAfterTokens.Pop();
tPassage.m_iTokens--;
tPassage.m_iCodes -= tTok.m_iLengthCP;
iTotalCodes -= tTok.m_iLengthCP;
iTotalWords -= tTok.m_iWordFlag;
}
if ( iTotalCodes<=iMaxCp && iTotalWords<=iMaxWords )
{
bDone = true;
break;
}
}
if ( iTotalCodes==iCodes )
break; // couldn't reduce anything
iCodes = iTotalCodes;
bFirst = !bFirst;
}
}
// if passages still don't fit start dropping least significant ones, limit is sacred.
while ( ( iTotalCodes > iMaxCp || iTotalWords > iMaxWords ) && !tSettings.m_bForceAllWords )
{
iTotalCodes -= dShow.Last().m_iCodes;
iTotalWords -= dShow.Last().m_iWords;
dShow.RemoveFast ( dShow.GetLength()-1 );
}
// sort passages in the document order
dShow.Sort ( PassagePositionOrder_fn() );
return dShow;
}
//////////////////////////////////////////////////////////////////////////
PassageContext_t::PassageContext_t()
{
m_dPassages.Reserve(1024);
memset ( m_dQwordWeights, 0, sizeof(m_dQwordWeights) );
}
CSphVector<Passage_t> PassageContext_t::SelectBest ( const SnippetLimits_t & tLimits, const SnippetQuerySettings_t & tSettings, const SnippetsDocIndex_c & tContainer, DWORD uFoundWords ) const
{
CSphVector<Passage_t> dPassagesToShow;
if ( !m_dPassages.GetLength() )
return dPassagesToShow;
dPassagesToShow = SelectBestPassages ( m_dPassages, tLimits, tSettings, tContainer, uFoundWords );
for ( auto & i : dPassagesToShow )
{
i.m_dBeforeTokens.Reset();
i.m_dAfterTokens.Reset();
}
return dPassagesToShow;
}
| 9,125
|
C++
|
.cpp
| 255
| 32.658824
| 224
| 0.697371
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,900
|
net_action_accept.cpp
|
manticoresoftware_manticoresearch/src/net_action_accept.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "net_action_accept.h"
#include "netreceive_api.h"
#include "netreceive_ql.h"
#include "netreceive_http.h"
#include "coroutine.h"
#include "client_session.h"
#if _WIN32
// Win-specific headers and calls
#include <io.h>
using sph_sa_family_t=ADDRESS_FAMILY;
#else
// UNIX-specific headers and calls
#include <sys/wait.h>
#include <netdb.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
using sph_sa_family_t = sa_family_t;
#endif
int g_iThrottleAccept = 0;
extern volatile bool g_bMaintenance;
using ClientTaskSlist_t = boost::intrusive::slist<
ClientTaskInfo_t,
boost::intrusive::member_hook<ClientTaskInfo_t, ClientTaskHook_t, &ClientTaskInfo_t::m_tLink>,
boost::intrusive::constant_time_size<false>,
boost::intrusive::cache_last<true>>;
class TaskSlist_c
{
mutable CSphMutex m_tGuard;
// sph::Spinlock_c m_tGuard {};
ClientTaskSlist_t m_tList GUARDED_BY ( m_tGuard );
public:
void Enqueue ( ClientTaskInfo_t* pTask ) EXCLUDES ( m_tGuard )
{
assert ( pTask );
if ( !pTask->m_tLink.is_linked() )
{
const ScopedMutex_t tLock { m_tGuard };
// sph::Spinlock_lock tLock { m_tGuard };
m_tList.push_back ( *pTask );
}
}
void Remove ( ClientTaskInfo_t* pTask ) EXCLUDES ( m_tGuard )
{
assert ( pTask );
if ( pTask->m_tLink.is_linked() )
{
const ScopedMutex_t tLock { m_tGuard };
// sph::Spinlock_lock tLock { m_tGuard };
m_tList.remove ( *pTask );
}
}
void IterateTasks ( TaskIteratorFn&& fnHandler )
{
const ScopedMutex_t tLock { m_tGuard };
for ( auto& tTask : m_tList )
fnHandler ( &tTask );
}
};
//hazard::Guard_c tGuard;
//auto pDescription = tGuard.Protect ( m_pHazardDescription );
void FormatClientAddress ( char szClientName[SPH_ADDRPORT_SIZE], const sockaddr_storage & saStorage )
{
// format client address
szClientName[0] = '\0';
if ( saStorage.ss_family==AF_INET )
{
struct sockaddr_in * pSa = ( (struct sockaddr_in *) &saStorage );
sphFormatIP ( szClientName, SPH_ADDRESS_SIZE, pSa->sin_addr.s_addr );
char * d = szClientName;
while ( *d )
d++;
snprintf ( d, 7, ":%d", (int) ntohs ( pSa->sin_port ) ); //NOLINT
} else if ( saStorage.ss_family==AF_UNIX )
{
strncpy ( szClientName, "(local)", 8 );
}
}
using NetConnection_t = std::pair<int, sph_sa_family_t>;
void SetTcpNodelay ( NetConnection_t tConn )
{
if ( tConn.second==AF_INET )
sphSetSockNodelay ( tConn.first );
}
void MultiServe ( std::unique_ptr<AsyncNetBuffer_c> pBuf, NetConnection_t tConn, Proto_e eProto )
{
auto& tSess = session::Info();
tSess.SetProto ( eProto ); // set initially provided proto, then m.b. switch to another by multi, if possible
Proto_e eMultiProto;
switch ( eProto )
{
case Proto_e::MYSQL41: return SqlServe ( std::move ( pBuf ) );
case Proto_e::SPHINXSE: eMultiProto = Proto_e::SPHINXSE; break; // force sphinx SE
default:
eMultiProto = pBuf->Probe();
}
switch ( eMultiProto )
{
case Proto_e::SPHINXSE:
// case of legacy 'crasy squirell' client, which talks using short packages.
if ( pBuf->HasBytes ()==4 )
SetTcpNodelay ( tConn );
// no break;
// [[clang::fallthrough]];
case Proto_e::SPHINX:
ApiServe ( std::move ( pBuf ));
break;
case Proto_e::HTTPS:
tSess.SetSsl ( true );
// [[clang::fallthrough]];
case Proto_e::HTTP:
SetTcpNodelay ( tConn );
HttpServe ( std::move ( pBuf ) );
break;
default:
sphLogDebugv ( "Unkown proto" );
break;
}
}
class NetActionAccept_c::Impl_c
{
Listener_t m_tListener;
CSphNetLoop* m_pNetLoop;
public:
explicit Impl_c ( const Listener_t & tListener, CSphNetLoop* pNetLoop ) : m_tListener ( tListener ), m_pNetLoop ( pNetLoop ) {}
void ProcessAccept ();
};
static DWORD NextConnectionID()
{
static std::atomic<DWORD> g_iConnectionID { 1 }; ///< global conn-id
return g_iConnectionID.fetch_add ( 1, std::memory_order_relaxed );
}
class ScopedClientInfo_c: public ScopedInfo_T<ClientTaskInfo_t>
{
public:
bool m_bVip;
static TaskSlist_c m_tTasks;
ClientSession_c tSession;
public:
explicit ScopedClientInfo_c ( ClientTaskInfo_t* pInfo )
: ScopedInfo_T<ClientTaskInfo_t> ( pInfo )
, m_bVip ( pInfo->GetVip() )
{
ClientTaskInfo_t::m_iClients.fetch_add ( 1, std::memory_order_relaxed );
if ( m_bVip )
ClientTaskInfo_t::m_iVips.fetch_add ( 1, std::memory_order_relaxed );
pInfo->SetClientSession ( &tSession );
m_tTasks.Enqueue ( pInfo );
}
~ScopedClientInfo_c()
{
Dequeue();
if ( m_bVip )
ClientTaskInfo_t::m_iVips.fetch_sub ( 1, std::memory_order_relaxed );
if ( m_pInfo->GetBuddy() )
ClientTaskInfo_t::m_iBuddy.fetch_sub ( 1, std::memory_order_relaxed );
ClientTaskInfo_t::m_iClients.fetch_sub ( 1, std::memory_order_relaxed );
}
void Dequeue()
{
m_tTasks.Remove ( (ClientTaskInfo_t*)m_pInfo );
m_pInfo->SetClientSession ( nullptr );
m_pInfo->SetTaskState ( TaskState_e::RETIRED );
}
};
TaskSlist_c ScopedClientInfo_c::m_tTasks;
void IterateTasks ( TaskIteratorFn&& fnHandler )
{
ScopedClientInfo_c::m_tTasks.IterateTasks ( std::move ( fnHandler ) );
}
void NetActionAccept_c::Impl_c::ProcessAccept ()
{
if ( sphInterrupted () )
return;
// handle all incoming requests at once but not too much
int iAccepted = 0;
auto _ = AtScopeExit([&iAccepted] { gStats().m_iConnections.fetch_add ( iAccepted, std::memory_order_relaxed ); });
sockaddr_storage saStorage = {0};
socklen_t uLength = sizeof(saStorage);
CSphRefcountedPtr<CSphNetLoop> pLoop { m_pNetLoop };
SafeAddRef ( m_pNetLoop );
while (true)
{
if ( g_iThrottleAccept && g_iThrottleAccept<iAccepted )
{
sphLogDebugv ( "%p accepted %d connections throttled", this, iAccepted );
return;
}
// accept
int iClientSock = accept ( m_tListener.m_iSock, (struct sockaddr *)&saStorage, &uLength );
// handle failures and no more incoming clients
if ( iClientSock<0 )
{
const int iErrno = sphSockGetErrno();
if ( iErrno==EINTR || iErrno==ECONNABORTED || iErrno==EAGAIN || iErrno==EWOULDBLOCK )
{
if ( iAccepted )
sphLogDebugv ( "%p accepted %d connections all, tick=%u", this, iAccepted, myinfo::ref<ListenTaskInfo_t> ()->m_uTick );
return;
}
if ( iErrno==EMFILE || iErrno==ENFILE )
{
sphWarning ( "accept() failed, raise ulimit -n and restart searchd: %s", sphSockError(iErrno) );
return;
}
sphFatal ( "accept() failed: %s", sphSockError(iErrno) );
}
if ( sphSetSockNB ( iClientSock )<0 )
{
sphWarning ( "sphSetSockNB() failed: %s", sphSockError() );
sphSockClose ( iClientSock );
return;
}
if ( g_bMaintenance && !m_tListener.m_bVIP )
{
sphWarning ( "server is in maintenance mode: refusing connection" );
sphSockClose ( iClientSock );
return;
}
++iAccepted;
int iConnID = NextConnectionID();
/*
* Modes of execution:
* - usual: default scheduler + non-zero netloop. Polling performed by netloop; working by thread pool.
* - vip: alone scheduler and zero netloop. All work (polling and calculations) performed by dedicated alone thread.
*/
auto pClientNetLoop = pLoop;
using SchedulerFabric_fn = std::function<Threads::Scheduler_i*( void )>;
SchedulerFabric_fn fnMakeScheduler = nullptr;
if ( m_tListener.m_bVIP )
{
pClientNetLoop = nullptr;
// fixme! for now pass -1, which means 'no limit N of workers'. M.b. need to obey max_children here.
fnMakeScheduler = [] { sphLogDebugv ( "-~-~-~-~-~-~-~-~ Alone sched created -~-~-~-~-~-~-~-~" ); return MakeSingleThreadExecutor ( -1 ); };
} else
{
fnMakeScheduler = [] { sphLogDebugv ( "-~-~-~-~-~-~-~-~ MT sched created -~-~-~-~-~-~-~-~" ); return GlobalWorkPool (); };
}
char szClientName[SPH_ADDRPORT_SIZE];
FormatClientAddress ( szClientName, saStorage );
auto pClientInfo = std::make_unique<ClientTaskInfo_t>();
pClientInfo->SetClientName ( szClientName );
pClientInfo->SetConnID ( iConnID );
pClientInfo->SetSocket ( iClientSock );
pClientInfo->SetVip ( m_tListener.m_bVIP );
pClientInfo->SetReadOnly( m_tListener.m_bReadOnly );
NetConnection_t tConn = { iClientSock, saStorage.ss_family };
auto pBuf = MakeAsyncNetBuffer ( std::make_unique<SockWrapper_c> ( iClientSock, pClientNetLoop ) );
auto eProto = m_tListener.m_eProto;
switch ( eProto )
{
case Proto_e::SPHINX:
case Proto_e::SPHINXSE:
case Proto_e::HTTPS:
case Proto_e::HTTP :
case Proto_e::MYSQL41:
{
Threads::Coro::Go ( [pRawBuf = pBuf.release(), tConn, _pInfo = pClientInfo.release(), eProto]() mutable
{
ScopedClientInfo_c pInfo { _pInfo }; // make visible task info
MultiServe ( std::unique_ptr<AsyncNetBuffer_c> ( pRawBuf ), tConn, eProto );
}, fnMakeScheduler () );
break;
}
case Proto_e::REPLICATION:
assert (false && "replication must be processed on another level");
break;
default:
break;
}
sphLogDebugv ( "%p accepted %s, sock=%d, tick=%u", this,
RelaxedProtoName(m_tListener.m_eProto), iClientSock, myinfo::ref<ListenTaskInfo_t> ()->m_uTick );
}
}
NetActionAccept_c::NetActionAccept_c ( const Listener_t & tListener, CSphNetLoop* pNetLoop )
: ISphNetAction ( tListener.m_iSock )
, m_pImpl ( std::make_unique<Impl_c> ( tListener, pNetLoop ) )
{
m_uIOChange = NetPollEvent_t::SET_READ;
}
NetActionAccept_c::~NetActionAccept_c () = default;
void NetActionAccept_c::Process ()
{
if ( !CheckSocketError() )
m_pImpl->ProcessAccept();
}
void NetActionAccept_c::NetLoopDestroying()
{
Release();
}
| 9,861
|
C++
|
.cpp
| 300
| 30.24
| 142
| 0.698591
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,901
|
searchd.cpp
|
manticoresoftware_manticoresearch/src/searchd.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxutils.h"
#include "fileutils.h"
#include "sphinxexcerpt.h"
#include "sphinxrt.h"
#include "sphinxpq.h"
#include "sphinxint.h"
#include "sphinxquery.h"
#include "sphinxsort.h"
#include "sphinxjson.h"
#include "sphinxjsonquery.h"
#include "sphinxplugin.h"
#include "sphinxqcache.h"
#include "accumulator.h"
#include "searchdaemon.h"
#include "searchdha.h"
#include "searchdreplication.h"
#include "replication/api_command_cluster.h"
#include "threadutils.h"
#include "searchdtask.h"
#include "global_idf.h"
#include "docstore.h"
#include "searchdssl.h"
#include "searchdexpr.h"
#include "indexsettings.h"
#include "searchdddl.h"
#include "networking_daemon.h"
#include "query_status.h"
#include "debug_cmds.h"
#include "stackmock.h"
#include "binlog.h"
#include "indexfiles.h"
#include "digest_sha1.h"
#include "tokenizer/charset_definition_parser.h"
#include "client_session.h"
#include "sphinx_alter.h"
#include "docs_collector.h"
#include "index_rotator.h"
#include "config_reloader.h"
#include "secondarylib.h"
#include "knnlib.h"
#include "task_dispatcher.h"
#include "tracer.h"
#include "netfetch.h"
#include "queryfilter.h"
#include "datetime.h"
#include "exprdatetime.h"
#include "pseudosharding.h"
#include "geodist.h"
#include "joinsorter.h"
#include "schematransform.h"
#include "frontendschema.h"
#include "skip_cache.h"
#include "jieba.h"
// services
#include "taskping.h"
#include "taskmalloctrim.h"
#include "taskglobalidf.h"
#include "tasksavestate.h"
#include "taskflushbinlog.h"
#include "taskflushattrs.h"
#include "taskflushmutable.h"
#include "taskpreread.h"
#include "coroutine.h"
#include "dynamic_idx.h"
#include "searchdbuddy.h"
#include "detail/indexlink.h"
#include "detail/expmeter.h"
extern "C"
{
#include "sphinxudf.h"
}
#include <csignal>
#include <clocale>
#include <cmath>
#include <ctime>
#define SEARCHD_BACKLOG 5
// don't shutdown on SIGKILL (debug purposes)
// 1 - SIGKILL will shut down the whole daemon; 0 - watchdog will reincarnate the daemon
#define WATCHDOG_SIGKILL 1
/////////////////////////////////////////////////////////////////////////////
#if _WIN32
// Win-specific headers and calls
#include <io.h>
#else
// UNIX-specific headers and calls
#include <sys/wait.h>
#include <netdb.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#endif
#if USE_SYSLOG
#include <syslog.h>
#endif
#if HAVE_GETRLIMIT & HAVE_SETRLIMIT
#include <sys/resource.h>
#endif
/////////////////////////////////////////////////////////////////////////////
using namespace Threads;
static bool g_bService = false;
#if _WIN32
static bool g_bServiceStop = false;
static const char * g_sServiceName = "searchd";
static HANDLE g_hPipe = INVALID_HANDLE_VALUE;
#endif
static StrVec_t g_dArgs;
enum LogFormat_e
{
LOG_FORMAT_PLAIN,
LOG_FORMAT_SPHINXQL
};
#define LOG_COMPACT_IN 128 // upto this many IN(..) values allowed in query_log
static int g_iLogFile = STDOUT_FILENO; // log file descriptor
static auto& g_iParentPID = getParentPID (); // set by watchdog
static bool g_bLogSyslog = false;
static bool g_bQuerySyslog = false;
static CSphString g_sLogFile; // log file name
static bool g_bLogTty = false; // cached isatty(g_iLogFile)
static bool g_bLogStdout = true; // extra copy of startup log messages to stdout; true until around "accepting connections", then MUST be false
static LogFormat_e g_eLogFormat = LOG_FORMAT_SPHINXQL;
static bool g_bLogCompactIn = false; // whether to cut list in IN() clauses.
static int g_iQueryLogMinMs = 0; // log 'slow' threshold for query
static char g_sLogFilter[SPH_MAX_FILENAME_LEN+1] = "\0";
static int g_iLogFilterLen = 0;
static int g_iLogFileMode = 0;
static CSphBitvec g_tLogStatements;
int g_iReadTimeoutS = 5; // sec
int g_iWriteTimeoutS = 5; // sec
bool g_bTimeoutEachPacket = true;
int g_iClientTimeoutS = 300;
int g_iClientQlTimeoutS = 900; // sec
static int g_iMaxConnection = 0; // unlimited
static int g_iThreads; // defined in config, or =cpu cores
static bool g_bWatchdog = true;
static int g_iExpansionLimit = 0;
static int g_iShutdownTimeoutUs = 3000000; // default timeout on daemon shutdown and stopwait is 3 seconds
static int g_iBacklog = SEARCHD_BACKLOG;
static int g_iThdQueueMax = 0;
static auto& g_iTFO = sphGetTFO ();
static int g_iServerID = 0;
static bool g_bServerID = false;
static bool g_bJsonConfigLoadedOk = false;
static auto& g_iAutoOptimizeCutoffMultiplier = AutoOptimizeCutoffMultiplier();
static constexpr bool AUTOOPTIMIZE_NEEDS_VIP = false; // whether non-VIP can issue 'SET GLOBAL auto_optimize = X'
static constexpr bool THREAD_EX_NEEDS_VIP = false; // whether non-VIP can issue 'SET GLOBAL auto_optimize = X'
static CSphVector<Listener_t> g_dListeners;
static int g_iQueryLogFile = -1;
static CSphString g_sQueryLogFile;
static CSphString g_sPidFile;
static bool g_bPidIsMine = false; // if PID is not mine, don't unlink it on fail
static int g_iPidFD = -1;
static int g_iMaxCachedDocs = 0; // in bytes
static int g_iMaxCachedHits = 0; // in bytes
int g_iMaxPacketSize = 128*1024*1024; // in bytes; for both query packets from clients and response packets from agents
static int g_iMaxFilters = 256;
static int g_iMaxFilterValues = 4096;
static int g_iMaxBatchQueries = 32;
static int64_t g_iDocstoreCache = 0;
static int64_t g_iSkipCache = 0;
static auto & g_iDistThreads = getDistThreads();
const int DAEMON_DEFAULT_CONNECT_TIMEOUT = 1000;
const int DAEMON_DEFAULT_QUERY_TIMEOUT = 3000;
const int DAEMON_MAX_RETRY_COUNT = 8;
const int DAEMON_MAX_RETRY_DELAY = 1000;
int g_iAgentConnectTimeoutMs = DAEMON_DEFAULT_CONNECT_TIMEOUT;
int g_iAgentQueryTimeoutMs = DAEMON_DEFAULT_QUERY_TIMEOUT; // global (default). May be override by index-scope values, if one specified
int g_iAgentRetryCount = 0;
int g_iAgentRetryDelayMs = DAEMON_MAX_RETRY_DELAY/2; // global (default) values. May be override by the query options 'retry_count' and 'retry_timeout'
static int g_iReplConnectTimeoutMs = DAEMON_DEFAULT_CONNECT_TIMEOUT;
static int g_iReplQueryTimeoutMs = DAEMON_DEFAULT_QUERY_TIMEOUT;
static int g_iReplRetryCount = 3;
static int g_iReplRetryDelayMs = DAEMON_MAX_RETRY_DELAY/2;
bool g_bHostnameLookup = false;
CSphString g_sMySQLVersion = szMANTICORE_VERSION;
CSphString g_sDbName = "Manticore";
CSphString g_sBannerVersion { szMANTICORE_NAME };
CSphString g_sBanner;
CSphString g_sStatusVersion = szMANTICORE_VERSION;
CSphString g_sSecondaryError;
static CSphString g_sBuddyPath;
static bool g_bTelemetry = val_from_env ( "MANTICORE_TELEMETRY", true );
static bool g_bHasBuddyPath = false;
static bool g_bAutoSchema = true;
static bool g_bNoChangeCwd = val_from_env ( "MANTICORE_NO_CHANGE_CWD", false );
static bool g_bCwdChanged = false;
// for CLang thread-safety analysis
ThreadRole MainThread; // functions which called only from main thread
ThreadRole HandlerThread; // thread which serves clients
//////////////////////////////////////////////////////////////////////////
static CSphString g_sConfigFile;
static bool LOG_LEVEL_SHUTDOWN = val_from_env("MANTICORE_TRACK_DAEMON_SHUTDOWN",false); // verbose logging when daemon shutdown, ruled by this env variable
static CSphString g_sConfigPath; // for resolve paths to absolute
static CSphString g_sExePath;
static auto& g_bSeamlessRotate = sphGetSeamlessRotate ();
static bool g_bIOStats = false;
static auto& g_bCpuStats = sphGetbCpuStat ();
static bool g_bOptNoDetach = false; // whether to detach from console, or work in front
static bool g_bOptNoLock = false; // whether to lock indexes (with .spl) or not
static bool g_bSafeTrace = false;
static bool g_bStripPath = false;
static bool g_bCoreDump = false;
static bool LOG_LEVEL_LOCAL_SEARCH = val_from_env ( "MANTICORE_LOG_LOCAL_SEARCH", false ); // verbose logging local search events, ruled by this env variable
#define LOG_COMPONENT_LOCSEARCHINFO __LINE__ << " "
#define LOCSEARCHINFO LOGINFO ( LOCAL_SEARCH, LOCSEARCHINFO )
static auto& g_bGotSighup = sphGetGotSighup(); // we just received SIGHUP; need to log
static auto& g_bGotSigusr1 = sphGetGotSigusr1(); // we just received SIGUSR1; need to reopen logs
static auto& g_bGotSigusr2 = sphGetGotSigusr2(); // we just received SIGUSR2; need to dump daemon's bt
// pipe to watchdog to inform that daemon is going to close, so no need to restart it in case of crash
struct SharedData_t
{
bool m_bDaemonAtShutdown;
bool m_bHaveTTY;
};
static SharedData_t* g_pShared = nullptr;
volatile bool g_bMaintenance = false;
std::unique_ptr<ReadOnlyServedHash_c> g_pLocalIndexes = std::make_unique<ReadOnlyServedHash_c>(); // served (local) indexes hash
std::unique_ptr<ReadOnlyDistrHash_c> g_pDistIndexes = std::make_unique<ReadOnlyDistrHash_c>(); // distributed indexes hash
// this is internal deal of the daemon; don't expose it outside!
// fixme! move all this stuff to dedicated file.
static RwLock_t g_tRotateConfigMutex;
static CSphConfig g_hCfg GUARDED_BY ( g_tRotateConfigMutex );
static volatile bool g_bNeedRotate = false; // true if there were pending HUPs to handle (they could fly in during previous rotate)
static volatile bool g_bInRotate = false; // true while we are rotating
static volatile bool g_bReloadForced = false; // true in case reload issued via SphinxQL
static WorkerSharedPtr_t g_pTickPoolThread;
static CSphVector<CSphNetLoop*> g_dNetLoops;
constexpr int g_iExpMeterPeriod = 5000000; // once per 5s
static ExpMeter_c g_tStat1m { 12 }; // once a minute (12 * 5s)
static ExpMeter_c g_tStat5m { 12*5 }; // once a 5 minutes
static ExpMeter_c g_tStat15m { 12*15 }; // once a 15 minutes
static ExpMeter_c g_tPriStat1m { 12 }; // once a minute (12 * 5s)
static ExpMeter_c g_tPriStat5m { 12*5 }; // once a 5 minutes
static ExpMeter_c g_tPriStat15m { 12*15 }; // once a 15 minutes
static ExpMeter_c g_tSecStat1m { 12 }; // once a minute (12 * 5s)
static ExpMeter_c g_tSecStat5m { 12*5 }; // once a 5 minutes
static ExpMeter_c g_tSecStat15m { 12*15 }; // once a 15 minutes
int64_t g_iNextExpMeterTimestamp = sphMicroTimer() + g_iExpMeterPeriod;
static CSphString g_sClusterUser { "cluster" }; // user with this name will see cluster:table in show tables
/// command names
static const char * g_dApiCommands[] =
{
"search", "excerpt", "update", "keywords", "persist", "status", "query", "flushattrs", "query", "ping", "delete", "set", "insert", "replace", "commit", "suggest", "json",
"callpq", "clusterpq", "getfield"
};
STATIC_ASSERT ( sizeof(g_dApiCommands)/sizeof(g_dApiCommands[0])==SEARCHD_COMMAND_TOTAL, SEARCHD_COMMAND_SHOULD_BE_SAME_AS_SEARCHD_COMMAND_TOTAL );
//////////////////////////////////////////////////////////////////////////
const char * sAgentStatsNames[eMaxAgentStat+ehMaxStat]=
{ "query_timeouts", "connect_timeouts", "connect_failures",
"network_errors", "wrong_replies", "unexpected_closings",
"warnings", "succeeded_queries", "total_query_time",
"connect_count", "connect_avg", "connect_max" };
static RwLock_t g_tLastMetaLock;
static CSphQueryResultMeta g_tLastMeta GUARDED_BY ( g_tLastMetaLock );
/////////////////////////////////////////////////////////////////////////////
// MISC
/////////////////////////////////////////////////////////////////////////////
static void ReleaseTTYFlag()
{
if ( g_pShared )
g_pShared->m_bHaveTTY = true;
}
/////////////////////////////////////////////////////////////////////////////
// LOGGING
/////////////////////////////////////////////////////////////////////////////
/// physically emit log entry
/// buffer must have 1 extra byte for linefeed
#if _WIN32
static void sphLogEntry ( ESphLogLevel eLevel, char * sBuf, char * sTtyBuf )
#else
static void sphLogEntry ( ESphLogLevel , char * sBuf, char * sTtyBuf )
#endif
{
#if _WIN32
if ( g_bService && g_iLogFile==STDOUT_FILENO )
{
HANDLE hEventSource;
LPCTSTR lpszStrings[2];
hEventSource = RegisterEventSource ( NULL, g_sServiceName );
if ( hEventSource )
{
lpszStrings[0] = g_sServiceName;
lpszStrings[1] = sBuf;
WORD eType;
switch ( eLevel )
{
case SPH_LOG_FATAL: eType = EVENTLOG_ERROR_TYPE; break;
case SPH_LOG_WARNING: eType = EVENTLOG_WARNING_TYPE; break;
case SPH_LOG_INFO: eType = EVENTLOG_INFORMATION_TYPE; break;
default: eType = EVENTLOG_INFORMATION_TYPE; break;
}
ReportEvent ( hEventSource, // event log handle
eType, // event type
0, // event category
0, // event identifier
NULL, // no security identifier
2, // size of lpszStrings array
0, // no binary data
lpszStrings, // array of strings
NULL ); // no binary data
DeregisterEventSource ( hEventSource );
}
} else
#endif
{
strcat ( sBuf, "\n" ); // NOLINT
sphSeek ( g_iLogFile, 0, SEEK_END );
if ( g_bLogTty )
{
memmove ( sBuf+20, sBuf+15, 9);
sTtyBuf = sBuf + 19;
*sTtyBuf = '[';
sphWrite ( g_iLogFile, sTtyBuf, strlen(sTtyBuf) );
}
else
sphWrite ( g_iLogFile, sBuf, strlen(sBuf) );
if ( g_bLogStdout && g_iLogFile!=STDOUT_FILENO )
sphWrite ( STDOUT_FILENO, sTtyBuf, strlen(sTtyBuf) );
}
}
const int LOG_BUF_SIZE = 1024;
int GetDaemonLogBufSize ()
{
return LOG_BUF_SIZE;
}
/// log entry (with log levels, dupe catching, etc)
/// call with NULL format for dupe flushing
void sphLog ( ESphLogLevel eLevel, const char * sFmt, va_list ap )
{
// dupe catcher state
static const int FLUSH_THRESH_TIME = 1000000; // in microseconds
static const int FLUSH_THRESH_COUNT = 100;
static ESphLogLevel eLastLevel = SPH_LOG_INFO;
static DWORD uLastEntry = 0;
static int64_t tmLastStamp = -1000000-FLUSH_THRESH_TIME;
static int iLastRepeats = 0;
// only if we can
if ( sFmt && eLevel>g_eLogLevel )
return;
#if USE_SYSLOG
if ( g_bLogSyslog && sFmt )
{
const int levels[SPH_LOG_MAX+1] = { LOG_EMERG, LOG_WARNING, LOG_INFO, LOG_DEBUG, LOG_DEBUG, LOG_DEBUG, LOG_DEBUG };
vsyslog ( levels[eLevel], sFmt, ap );
return;
}
#endif
if ( g_iLogFile<0 && !g_bService )
return;
// format the banner
char sTimeBuf[128];
sphFormatCurrentTime ( sTimeBuf, sizeof(sTimeBuf) );
const char * sBanner = "";
if ( sFmt==NULL ) eLevel = eLastLevel;
if ( eLevel==SPH_LOG_FATAL ) sBanner = "FATAL: ";
if ( eLevel==SPH_LOG_WARNING ) sBanner = "WARNING: ";
if ( eLevel>=SPH_LOG_DEBUG ) sBanner = "DEBUG: ";
if ( eLevel==SPH_LOG_RPL_DEBUG ) sBanner = "RPL: ";
char sBuf [ LOG_BUF_SIZE ];
snprintf ( sBuf, sizeof(sBuf)-1, "[%s] [%d] ", sTimeBuf, GetOsThreadId() );
char * sTtyBuf = sBuf + strlen(sBuf);
strncpy ( sTtyBuf, sBanner, 32 ); // 32 is arbitrary; just something that is enough and keeps lint happy
auto iLen = (int) strlen(sBuf);
// format the message
if ( sFmt )
{
// need more space for tail zero and "\n" that added at sphLogEntry
int iSafeGap = 4;
int iBufSize = sizeof(sBuf)-iLen-iSafeGap;
vsnprintf ( sBuf+iLen, iBufSize, sFmt, ap );
sBuf[ sizeof(sBuf)-iSafeGap ] = '\0';
}
if ( sFmt && eLevel>SPH_LOG_INFO && g_iLogFilterLen )
{
if ( strncmp ( sBuf+iLen, g_sLogFilter, g_iLogFilterLen )!=0 )
return;
}
// catch dupes
DWORD uEntry = sFmt ? sphCRC32 ( sBuf+iLen ) : 0;
int64_t tmNow = sphMicroTimer();
// accumulate while possible
if ( sFmt && eLevel==eLastLevel && uEntry==uLastEntry && iLastRepeats<FLUSH_THRESH_COUNT && tmNow<tmLastStamp+FLUSH_THRESH_TIME )
{
tmLastStamp = tmNow;
iLastRepeats++;
return;
}
// flush if needed
if ( iLastRepeats!=0 && ( sFmt || tmNow>=tmLastStamp+FLUSH_THRESH_TIME ) )
{
// flush if we actually have something to flush, and
// case 1: got a message we can't accumulate
// case 2: got a periodic flush and been otherwise idle for a thresh period
char sLast[256];
iLen = Min ( iLen, 256 );
strncpy ( sLast, sBuf, iLen );
if ( iLen < 256 )
snprintf ( sLast+iLen, sizeof(sLast)-iLen, "last message repeated %d times", iLastRepeats );
sphLogEntry ( eLastLevel, sLast, sLast + ( sTtyBuf-sBuf ) );
tmLastStamp = tmNow;
iLastRepeats = 0;
eLastLevel = SPH_LOG_INFO;
uLastEntry = 0;
}
// was that a flush-only call?
if ( !sFmt )
return;
tmLastStamp = tmNow;
iLastRepeats = 0;
eLastLevel = eLevel;
uLastEntry = uEntry;
// do the logging
sphLogEntry ( eLevel, sBuf, sTtyBuf );
}
void Shutdown (); // forward
bool DieOrFatalWithShutdownCb ( bool bDie, const char * sFmt, va_list ap )
{
if ( bDie )
g_pLogger () ( SPH_LOG_FATAL, sFmt, ap );
else
Shutdown ();
return false; // don't lot to stdout
}
bool DieOrFatalCb ( bool bDie, const char * sFmt, va_list ap )
{
if ( bDie )
g_pLogger () ( SPH_LOG_FATAL, sFmt, ap );
return false; // don't lot to stdout
}
#if !_WIN32
static CSphString GetNamedPipeName ( int iPid )
{
CSphString sRes;
sRes.SetSprintf ( "/tmp/searchd_%d", iPid );
return sRes;
}
#endif
void LogChangeMode ( int iFile, int iMode )
{
if ( iFile<0 || iMode==0 || iFile==STDOUT_FILENO || iFile==STDERR_FILENO )
return;
#if !_WIN32
fchmod ( iFile, iMode );
#endif
}
/////////////////////////////////////////////////////////////////////////////
static int CmpString ( const CSphString & a, const CSphString & b )
{
if ( !a.cstr() && !b.cstr() )
return 0;
if ( !a.cstr() || !b.cstr() )
return a.cstr() ? -1 : 1;
return strcmp ( a.cstr(), b.cstr() );
}
struct SearchFailure_t
{
CSphString m_sParentIndex;
CSphString m_sIndex; ///< searched index name
CSphString m_sError; ///< search error message
bool operator == ( const SearchFailure_t & r ) const
{
return m_sIndex==r.m_sIndex && m_sError==r.m_sError && m_sParentIndex==r.m_sParentIndex;
}
bool operator < ( const SearchFailure_t & r ) const
{
int iRes = CmpString ( m_sError.cstr(), r.m_sError.cstr() );
if ( !iRes )
iRes = CmpString ( m_sParentIndex.cstr (), r.m_sParentIndex.cstr () );
if ( !iRes )
iRes = CmpString ( m_sIndex.cstr(), r.m_sIndex.cstr() );
return iRes<0;
}
SearchFailure_t & operator = ( const SearchFailure_t & r )
{
if ( this!=&r )
{
m_sParentIndex = r.m_sParentIndex;
m_sIndex = r.m_sIndex;
m_sError = r.m_sError;
}
return *this;
}
};
static void ReportIndexesName ( int iSpanStart, int iSpandEnd, const CSphVector<SearchFailure_t> & dLog, StringBuilder_c & sOut );
class SearchFailuresLog_c
{
CSphVector<SearchFailure_t> m_dLog;
public:
void Submit ( const CSphString& sIndex, const char * sParentIndex , const char * sError )
{
SearchFailure_t & tEntry = m_dLog.Add ();
tEntry.m_sParentIndex = sParentIndex;
tEntry.m_sIndex = sIndex;
tEntry.m_sError = sError;
}
void SubmitVa ( const char * sIndex, const char * sParentIndex, const char * sTemplate, va_list ap )
{
StringBuilder_c tError;
tError.vAppendf ( sTemplate, ap );
SearchFailure_t &tEntry = m_dLog.Add ();
tEntry.m_sParentIndex = sParentIndex;
tEntry.m_sIndex = sIndex;
tError.MoveTo ( tEntry.m_sError );
}
inline void Append ( const SearchFailuresLog_c& rhs )
{
m_dLog.Append ( rhs.m_dLog );
}
void SubmitEx ( const char * sIndex, const char * sParentIndex, const char * sTemplate, ... ) __attribute__ ( ( format ( printf, 4, 5 ) ) )
{
va_list ap;
va_start ( ap, sTemplate );
SubmitVa ( sIndex, sParentIndex, sTemplate, ap);
va_end ( ap );
}
void SubmitEx ( const CSphString &sIndex, const char * sParentIndex, const char * sTemplate, ... ) __attribute__ ( ( format ( printf, 4, 5 ) ) )
{
va_list ap;
va_start ( ap, sTemplate );
SubmitVa ( sIndex.cstr(), sParentIndex, sTemplate, ap );
va_end ( ap );
}
bool IsEmpty ()
{
return m_dLog.GetLength()==0;
}
int GetReportsCount()
{
return m_dLog.GetLength();
}
void BuildReport ( StringBuilder_c & sReport )
{
if ( IsEmpty() )
return;
// collapse same messages
m_dLog.Uniq ();
int iSpanStart = 0;
Comma_c sColon( { ";\n", 2 } );
for ( int i=1; i<=m_dLog.GetLength(); ++i )
{
// keep scanning while error text is the same
if ( i!=m_dLog.GetLength() )
if ( m_dLog[i].m_sError==m_dLog[i-1].m_sError )
continue;
if ( m_dLog[iSpanStart].m_sError.IsEmpty() )
continue;
sReport << sColon;
ReportIndexesName ( iSpanStart, i, m_dLog, sReport );
sReport << m_dLog[iSpanStart].m_sError;
// done
iSpanStart = i;
}
}
};
#define LOG_COMPONENT_SEARCHD __LINE__ << " "
#define SHUTINFO LOGINFO (SHUTDOWN,SEARCHD)
/////////////////////////////////////////////////////////////////////////////
// SIGNAL HANDLERS
/////////////////////////////////////////////////////////////////////////////
void Shutdown () REQUIRES ( MainThread ) NO_THREAD_SAFETY_ANALYSIS
{
// force even long time searches to shut
SHUTINFO << "Trigger g_bInterruptNow ...";
sphInterruptNow ();
SHUTINFO << "Shutdown curl query subsystem ...";
ShutdownCurl();
#if !_WIN32
int fdStopwait = -1;
#endif
bool bAttrsSaveOk = true;
if ( g_pShared )
g_pShared->m_bDaemonAtShutdown = true;
#if !_WIN32
// stopwait handshake
CSphString sPipeName = GetNamedPipeName ( getpid() );
fdStopwait = ::open ( sPipeName.cstr(), O_WRONLY | O_NONBLOCK );
if ( fdStopwait>=0 )
{
DWORD uHandshakeOk = 0;
int VARIABLE_IS_NOT_USED iDummy = ::write ( fdStopwait, &uHandshakeOk, sizeof(DWORD) );
}
#endif
int64_t tmShutStarted = sphMicroTimer ();
// release all planned/scheduled tasks
SHUTINFO << "Shut down mini timer ...";
sph::ShutdownMiniTimer();
SHUTINFO << "Shut down flushing mutable ...";
ShutdownFlushingMutable();
// stop search threads; up to shutdown_timeout seconds
SHUTINFO << "Wait preread (if any) finished ...";
WaitPrereadFinished ( g_iShutdownTimeoutUs );
// save attribute updates for all local indexes
SHUTINFO << "Finally save tables ...";
bAttrsSaveOk = FinallySaveIndexes();
// right before unlock loop
if ( g_bJsonConfigLoadedOk )
{
CSphString sError;
SHUTINFO << "Save json config ...";
SaveConfigInt(sError);
}
// stop netloop processing
SHUTINFO << "Stop netloop processing ...";
for ( auto & pNetLoop : g_dNetLoops )
{
pNetLoop->StopNetLoop ();
SafeRelease ( pNetLoop );
}
// stop netloop threads
SHUTINFO << "Stop netloop pool ...";
if ( g_pTickPoolThread )
g_pTickPoolThread->StopAll ();
// call scheduled callbacks:
// shutdown replication,
// shutdown ssl,
// shutdown tick threads,
SHUTINFO << "Invoke shutdown callbacks ...";
searchd::FireShutdownCbs ();
SHUTINFO << "Waiting clients to finish ... (" << myinfo::CountClients() << ")";
while ( ( myinfo::CountClients ()>0 ) && ( sphMicroTimer ()-tmShutStarted )<g_iShutdownTimeoutUs )
sphSleepMsec ( 50 );
if ( myinfo::CountClients ()>0 )
{
int64_t tmDelta = sphMicroTimer ()-tmShutStarted;
sphWarning ( "still %d alive tasks during shutdown, after %d.%03d sec", myinfo::CountClients (), (int) ( tmDelta
/ 1000000 ), (int) ( ( tmDelta / 1000 ) % 1000 ) );
}
// unlock indexes and release locks if needed
SHUTINFO << "Unlock tables ...";
{
ServedSnap_t hLocal = g_pLocalIndexes->GetHash();
for ( const auto& tIt : *hLocal )
RWIdx_c ( tIt.second )->Unlock();
}
Threads::CallCoroutine ( [] {
SHUTINFO << "Abandon local tables list ...";
g_pLocalIndexes->ReleaseAndClear();
// unlock Distr indexes automatically done by d-tr
SHUTINFO << "Abandon distr tables list ...";
g_pDistIndexes->ReleaseAndClear();
} );
SHUTINFO << "Shutdown alone threads (if any) ...";
Detached::ShutdownAllAlones();
SHUTINFO << "Shutdown main work pool ...";
StopGlobalWorkPool();
SHUTINFO << "Remove local tables list ...";
g_pLocalIndexes.reset();
SHUTINFO << "Remove distr tables list ...";
g_pDistIndexes.reset();
// clear shut down of rt indexes + binlog
SHUTINFO << "Finish IO stats collecting ...";
sphDoneIOStats();
SHUTINFO << "Finish binlog serving ...";
Binlog::Deinit();
SHUTINFO << "Shutdown docstore ...";
ShutdownDocstore();
SHUTINFO << "Shutdown skip cache ...";
ShutdownSkipCache();
SHUTINFO << "Shutdown global IDFs ...";
sph::ShutdownGlobalIDFs ();
SHUTINFO << "Shutdown aot ...";
sphAotShutdown ();
SHUTINFO << "Shutdown columnar ...";
ShutdownColumnar();
SHUTINFO << "Shutdown secondary ...";
ShutdownSecondary();
SHUTINFO << "Shutdown knn ...";
ShutdownKNN();
SHUTINFO << "Shutdown listeners ...";
for ( auto& dListener : g_dListeners )
if ( dListener.m_iSock>=0 )
sphSockClose ( dListener.m_iSock );
SHUTINFO << "Close persistent sockets ...";
ClosePersistentSockets();
// close pid
SHUTINFO << "Release (close) pid file ...";
if ( g_iPidFD!=-1 )
::close ( g_iPidFD );
g_iPidFD = -1;
// remove pid file, if we owned it
if ( g_bPidIsMine && !g_sPidFile.IsEmpty() )
::unlink ( g_sPidFile.cstr() );
SHUTINFO << "Shutdown hazard pointers ...";
hazard::Shutdown ();
// wordforms till there might be referenced from accum (rt-index), which, in turn, is part of client session.
// so, shutdown them before will probably fail.
// after hazard shutdown, all sessions are surely done, so wordforms is good to be destroyed at this point.
SHUTINFO << "Shutdown wordforms ...";
sphShutdownWordforms();
sphInfo ( "shutdown daemon version '%s' ...", g_sStatusVersion.cstr() );
sphInfo ( "shutdown complete" );
Threads::Done ( g_iLogFile );
#if _WIN32
CloseHandle ( g_hPipe );
#else
if ( fdStopwait>=0 )
{
DWORD uStatus = bAttrsSaveOk;
int VARIABLE_IS_NOT_USED iDummy = ::write ( fdStopwait, &uStatus, sizeof(DWORD) );
::close ( fdStopwait );
}
#endif
}
void sighup ( int )
{
g_bGotSighup = 1;
}
static void sigterm ( int )
{
// tricky bit
// we can't call exit() here because malloc()/free() are not re-entrant
// we could call _exit() but let's try to die gracefully on TERM
// and let signal sender wait and send KILL as needed
sphInterruptNow();
}
static void sigusr1 ( int )
{
g_bGotSigusr1 = true;
}
static void sigusr2 ( int )
{
g_bGotSigusr2 = true;
}
struct QueryCopyState_t
{
BYTE * m_pDst;
BYTE * m_pDstEnd;
const BYTE * m_pSrc;
const BYTE * m_pSrcEnd;
};
// crash query handler
static const int g_iQueryLineLen = 80;
static const char g_dEncodeBase64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
static bool sphCopyEncodedBase64 ( QueryCopyState_t & tEnc )
{
BYTE * pDst = tEnc.m_pDst;
const BYTE * pDstBase = tEnc.m_pDst;
const BYTE * pSrc = tEnc.m_pSrc;
const BYTE * pDstEnd = tEnc.m_pDstEnd-5;
const BYTE * pSrcEnd = tEnc.m_pSrcEnd-3;
while ( pDst<=pDstEnd && pSrc<=pSrcEnd )
{
// put line delimiter at max line length
if ( ( ( pDst-pDstBase ) % g_iQueryLineLen )>( ( pDst-pDstBase+4 ) % g_iQueryLineLen ) )
*pDst++ = '\n';
// Convert to big endian
DWORD uSrc = ( pSrc[0] << 16 ) | ( pSrc[1] << 8 ) | ( pSrc[2] );
pSrc += 3;
*pDst++ = g_dEncodeBase64 [ ( uSrc & 0x00FC0000 ) >> 18 ];
*pDst++ = g_dEncodeBase64 [ ( uSrc & 0x0003F000 ) >> 12 ];
*pDst++ = g_dEncodeBase64 [ ( uSrc & 0x00000FC0 ) >> 6 ];
*pDst++ = g_dEncodeBase64 [ ( uSrc & 0x0000003F ) ];
}
// there is a tail in source data and a room for it at destination buffer
if ( pSrc<tEnc.m_pSrcEnd && ( tEnc.m_pSrcEnd-pSrc<3 ) && ( pDst<=pDstEnd-4 ) )
{
int iLeft = ( tEnc.m_pSrcEnd - pSrc ) % 3;
if ( iLeft==1 )
{
DWORD uSrc = pSrc[0]<<16;
pSrc += 1;
*pDst++ = g_dEncodeBase64 [ ( uSrc & 0x00FC0000 ) >> 18 ];
*pDst++ = g_dEncodeBase64 [ ( uSrc & 0x0003F000 ) >> 12 ];
*pDst++ = '=';
*pDst++ = '=';
} else if ( iLeft==2 )
{
DWORD uSrc = ( pSrc[0]<<16 ) | ( pSrc[1] << 8 );
pSrc += 2;
*pDst++ = g_dEncodeBase64 [ ( uSrc & 0x00FC0000 ) >> 18 ];
*pDst++ = g_dEncodeBase64 [ ( uSrc & 0x0003F000 ) >> 12 ];
*pDst++ = g_dEncodeBase64 [ ( uSrc & 0x00000FC0 ) >> 6 ];
*pDst++ = '=';
}
}
tEnc.m_pDst = pDst;
tEnc.m_pSrc = pSrc;
return ( tEnc.m_pSrc<tEnc.m_pSrcEnd );
}
static bool sphCopySphinxQL ( QueryCopyState_t & tState )
{
BYTE * pDst = tState.m_pDst;
const BYTE * pSrc = tState.m_pSrc;
BYTE * pNextLine = pDst+g_iQueryLineLen;
while ( pDst<tState.m_pDstEnd && pSrc<tState.m_pSrcEnd )
{
if ( pDst>pNextLine && pDst+1<tState.m_pDstEnd && ( sphIsSpace ( *pSrc ) || *pSrc==',' ) )
{
*pDst++ = *pSrc++;
*pDst++ = '\n';
pNextLine = pDst + g_iQueryLineLen;
} else
{
*pDst++ = *pSrc++;
}
}
tState.m_pDst = pDst;
tState.m_pSrc = pSrc;
return ( tState.m_pSrc<tState.m_pSrcEnd );
}
static bool sphCopySphinxHttp ( QueryCopyState_t & tState )
{
BYTE * pDst = tState.m_pDst;
const BYTE * pSrc = tState.m_pSrc;
while ( pDst<tState.m_pDstEnd && pSrc<tState.m_pSrcEnd )
{
*pDst++ = *pSrc++;
}
tState.m_pDst = pDst;
tState.m_pSrc = pSrc;
return ( tState.m_pSrc<tState.m_pSrcEnd );
}
typedef bool CopyQuery_fn ( QueryCopyState_t & tState );
#define SPH_TIME_PID_MAX_SIZE 256
const char g_sCrashedBannerAPI[] = "\n--- crashed SphinxAPI request dump ---\n";
const char g_sCrashedBannerMySQL[] = "\n--- crashed SphinxQL request dump ---\n";
const char g_sCrashedBannerHTTP[] = "\n--- crashed HTTP request dump ---\n";
const char g_sCrashedBannerBad[] = "\n--- crashed invalid query ---\n";
const char g_sCrashedBannerTail[] = "\n--- request dump end ---\n";
const char g_sCrashedIndex[] = "--- local index:";
const char g_sEndLine[] = "\n";
#if _WIN32
const char g_sMinidumpBanner[] = "minidump located at: ";
#endif
#if SPH_ALLOCS_PROFILER
const char g_sMemoryStatBanner[] = "\n--- memory statistics ---\n";
#endif
static BYTE g_dCrashQueryBuff [4096];
static char g_sCrashInfo [SPH_TIME_PID_MAX_SIZE] = "[][]\n";
static int g_iCrashInfoLen = 0;
#if _WIN32
static char g_sMinidump[SPH_TIME_PID_MAX_SIZE] = "";
#endif
#if !_WIN32
void CrashLogger::HandleCrash ( int sig ) NO_THREAD_SAFETY_ANALYSIS
#else
LONG WINAPI CrashLogger::HandleCrash ( EXCEPTION_POINTERS * pExc )
#endif // !_WIN32
{
sphSetDied();
if ( g_iLogFile<0 )
{
if ( g_bCoreDump )
{
CRASH_EXIT_CORE;
} else
{
CRASH_EXIT;
}
}
// log [time][pid]
sphSeek ( g_iLogFile, 0, SEEK_END );
sphWrite ( g_iLogFile, g_sCrashInfo, g_iCrashInfoLen );
// log query
auto& tQuery = GlobalCrashQueryGetRef ();
bool bValidQuery = IsFilled ( tQuery.m_dQuery );
#if !_WIN32
if ( bValidQuery )
{
size_t iPageSize = GetMemPageSize();
// FIXME! That is too complex way, remove all of this and just move query dump to the bottom
// remove also mincore_test.cmake, it's invokation from CMakeLists.txt and HAVE_UNSIGNED_MINCORE
// declatarion from config_cmake.h.in
#if HAVE_UNSIGNED_MINCORE
BYTE dPages = 0;
#else
char dPages = 0;
#endif
auto pPageStart = (uintptr_t) tQuery.m_dQuery.first;
pPageStart &= ~( iPageSize - 1 );
bValidQuery &= ( mincore ( ( void * ) pPageStart, 1, &dPages )==0 );
auto pPageEnd = (uintptr_t) ( tQuery.m_dQuery.first+tQuery.m_dQuery.second-1 );
pPageEnd &= ~( iPageSize - 1 );
bValidQuery &= ( mincore ( ( void * ) pPageEnd, 1, &dPages )==0 );
}
#endif
// request dump banner
Str_t dBanner = { g_sCrashedBannerAPI, sizeof ( g_sCrashedBannerAPI )-1 };
if ( tQuery.m_eType==QUERY_SQL )
dBanner = { g_sCrashedBannerMySQL, sizeof ( g_sCrashedBannerMySQL )-1 };
else if ( tQuery.m_eType==QUERY_JSON )
dBanner = { g_sCrashedBannerHTTP, sizeof ( g_sCrashedBannerHTTP )-1 };
if ( !bValidQuery )
dBanner = { g_sCrashedBannerBad, sizeof ( g_sCrashedBannerBad )-1 };
sphWrite ( g_iLogFile, dBanner );
// query
if ( bValidQuery )
{
QueryCopyState_t tCopyState;
tCopyState.m_pDst = g_dCrashQueryBuff;
tCopyState.m_pDstEnd = g_dCrashQueryBuff + sizeof(g_dCrashQueryBuff);
tCopyState.m_pSrc = tQuery.m_dQuery.first;
tCopyState.m_pSrcEnd = tQuery.m_dQuery.first + tQuery.m_dQuery.second;
CopyQuery_fn * pfnCopy = NULL;
if ( tQuery.m_eType==QUERY_API )
{
pfnCopy = &sphCopyEncodedBase64;
// should be power of 3 to seamlessly convert to BASE64
BYTE dHeader[] = {
(BYTE)( ( tQuery.m_uCMD>>8 ) & 0xff ),
(BYTE)( tQuery.m_uCMD & 0xff ),
(BYTE)( ( tQuery.m_uVer>>8 ) & 0xff ),
(BYTE)( tQuery.m_uVer & 0xff ),
(BYTE)( ( tQuery.m_dQuery.second>>24 ) & 0xff ),
(BYTE)( ( tQuery.m_dQuery.second>>16 ) & 0xff ),
(BYTE)( ( tQuery.m_dQuery.second>>8 ) & 0xff ),
(BYTE)( tQuery.m_dQuery.second & 0xff ),
*tQuery.m_dQuery.first
};
QueryCopyState_t tHeaderState;
tHeaderState.m_pDst = g_dCrashQueryBuff;
tHeaderState.m_pDstEnd = g_dCrashQueryBuff + sizeof(g_dCrashQueryBuff);
tHeaderState.m_pSrc = dHeader;
tHeaderState.m_pSrcEnd = dHeader + sizeof(dHeader);
pfnCopy ( tHeaderState );
assert ( tHeaderState.m_pSrc==tHeaderState.m_pSrcEnd );
tCopyState.m_pDst = tHeaderState.m_pDst;
tCopyState.m_pSrc++;
} else if ( tQuery.m_eType==QUERY_JSON )
{
pfnCopy = &sphCopySphinxHttp;
} else
{
pfnCopy = &sphCopySphinxQL;
}
while ( pfnCopy ( tCopyState ) )
{
sphWrite ( g_iLogFile, g_dCrashQueryBuff, tCopyState.m_pDst-g_dCrashQueryBuff );
tCopyState.m_pDst = g_dCrashQueryBuff; // reset the destination buffer
}
assert ( tCopyState.m_pSrc==tCopyState.m_pSrcEnd );
int iLeft = int ( tCopyState.m_pDst-g_dCrashQueryBuff );
if ( iLeft>0 )
sphWrite ( g_iLogFile, g_dCrashQueryBuff, iLeft );
}
// tail
sphWrite ( g_iLogFile, g_sCrashedBannerTail, sizeof(g_sCrashedBannerTail)-1 );
// index name
sphWrite ( g_iLogFile, g_sCrashedIndex, sizeof (g_sCrashedIndex)-1 );
if ( IsFilled ( tQuery.m_dIndex ) )
sphWrite ( g_iLogFile, tQuery.m_dIndex );
sphWrite ( g_iLogFile, g_sEndLine, sizeof (g_sEndLine)-1 );
sphSafeInfo ( g_iLogFile, g_sBannerVersion.cstr() );
#if _WIN32
// mini-dump reference
int iMiniDumpLen = snprintf ( (char *)g_dCrashQueryBuff, sizeof(g_dCrashQueryBuff),
"%s %s.%p.mdmp\n", g_sMinidumpBanner, g_sMinidump, tQuery.m_dQuery.first );
sphWrite ( g_iLogFile, g_dCrashQueryBuff, iMiniDumpLen );
snprintf ( (char *)g_dCrashQueryBuff, sizeof(g_dCrashQueryBuff), "%s.%p.mdmp",
g_sMinidump, tQuery.m_dQuery.first );
#endif
// log trace
#if !_WIN32
sphSafeInfo ( g_iLogFile, "Handling signal %d", sig );
// print message to stdout during daemon start
if ( g_bLogStdout && g_iLogFile!=STDOUT_FILENO )
sphSafeInfo ( STDOUT_FILENO, "Crash!!! Handling signal %d", sig );
sphBacktrace ( g_iLogFile, g_bSafeTrace );
#else
sphBacktrace ( pExc, (char *)g_dCrashQueryBuff );
#endif
// threads table
sphSafeInfo ( g_iLogFile, "--- active threads ---" );
int iThd = 0;
int iAllThd = 0;
Threads::IterateActive ( [&iThd,&iAllThd] ( Threads::LowThreadDesc_t * pThread )
{
if ( pThread )
{
auto pSrc = (ClientTaskInfo_t *) pThread->m_pTaskInfo.load ( std::memory_order_relaxed );
if ( pSrc ) ++iAllThd;
for ( ; pSrc; pSrc = (ClientTaskInfo_t *) pSrc->m_pPrev.load ( std::memory_order_relaxed ) )
if ( pSrc->m_eType==ClientTaskInfo_t::Task() )
{
sphSafeInfo ( g_iLogFile, "thd %d (%s), proto %s, state %s, command %s", iThd,
pThread->m_sThreadName.cstr(),
ProtoName (pSrc->GetProto()), TaskStateName ( pSrc->GetTaskState() ),
pSrc->m_szCommand ? pSrc->m_szCommand : "-" );
++iThd;
break;
}
}
} );
sphSafeInfo ( g_iLogFile, "--- Totally %d threads, and %d client-working threads ---", iAllThd, iThd );
// memory info
#if SPH_ALLOCS_PROFILER
sphWrite ( g_iLogFile, g_sMemoryStatBanner, sizeof ( g_sMemoryStatBanner )-1 );
sphMemStatDump ( g_iLogFile );
#endif
sphSafeInfo ( g_iLogFile, "------- CRASH DUMP END -------" );
if ( g_bCoreDump )
{
CRASH_EXIT_CORE;
} else
{
CRASH_EXIT;
}
}
void CrashLogger::SetupTimePID ()
{
char sTimeBuf[SPH_TIME_PID_MAX_SIZE];
sphFormatCurrentTime ( sTimeBuf, sizeof(sTimeBuf) );
g_iCrashInfoLen = snprintf ( g_sCrashInfo, SPH_TIME_PID_MAX_SIZE-1,
"------- FATAL: CRASH DUMP -------\n[%s] [%5d]\n", sTimeBuf, (int)getpid() );
}
#if _WIN32
void SetSignalHandlers ( bool )
{
sphBacktraceInit ();
snprintf ( g_sMinidump, SPH_TIME_PID_MAX_SIZE-1, "%s.%d", g_sPidFile.scstr(), (int)getpid() );
SetUnhandledExceptionFilter ( CrashLogger::HandleCrash );
}
#else
void SetSignalHandlers ( bool bAllowCtrlC=false ) REQUIRES ( MainThread )
{
sphBacktraceInit ();
struct sigaction sa;
sigfillset ( &sa.sa_mask );
sa.sa_flags = SA_NOCLDSTOP;
bool bSignalsSet = false;
auto dFatalOnFail = AtScopeExit( [ &bSignalsSet ]
{
if ( !bSignalsSet )
sphFatal( "sigaction(): %s", strerrorm(errno));
} );
sa.sa_handler = sigterm; if ( sigaction ( SIGTERM, &sa, NULL )!=0 ) return;
if ( !bAllowCtrlC )
{
sa.sa_handler = sigterm;
if ( sigaction ( SIGINT, &sa, NULL )!=0 )
return;
}
sa.sa_handler = sighup; if ( sigaction ( SIGHUP, &sa, NULL )!=0 ) return;
sa.sa_handler = sigusr1; if ( sigaction ( SIGUSR1, &sa, NULL )!=0 ) return;
sa.sa_handler = sigusr2; if ( sigaction ( SIGUSR2, &sa, NULL )!=0 ) return;
sa.sa_handler = SIG_IGN; if ( sigaction ( SIGPIPE, &sa, NULL )!=0 ) return;
sa.sa_flags |= SA_RESETHAND;
static CSphVector<BYTE> exception_handler_stack ( Max ( SIGSTKSZ, 65536 ) );
stack_t ss;
ss.ss_sp = exception_handler_stack.begin();
ss.ss_flags = 0;
ss.ss_size = exception_handler_stack.GetLength();
sigaltstack( &ss, 0 );
sa.sa_flags |= SA_ONSTACK;
sa.sa_handler = CrashLogger::HandleCrash; if ( sigaction ( SIGSEGV, &sa, NULL )!=0 ) return;
sa.sa_handler = CrashLogger::HandleCrash; if ( sigaction ( SIGBUS, &sa, NULL )!=0 ) return;
sa.sa_handler = CrashLogger::HandleCrash; if ( sigaction ( SIGABRT, &sa, NULL )!=0 ) return;
sa.sa_handler = CrashLogger::HandleCrash; if ( sigaction ( SIGILL, &sa, NULL )!=0 ) return;
sa.sa_handler = CrashLogger::HandleCrash; if ( sigaction ( SIGFPE, &sa, NULL )!=0 ) return;
bSignalsSet = true;
}
#endif
#if !_WIN32
int sphCreateUnixSocket ( const char * sPath ) REQUIRES ( MainThread )
{
static struct sockaddr_un uaddr;
size_t len = strlen ( sPath );
if ( len + 1 > sizeof( uaddr.sun_path ) )
sphFatal ( "UNIX socket path is too long (len=%d)", (int)len );
sphInfo ( "listening on UNIX socket %s", sPath );
memset ( &uaddr, 0, sizeof(uaddr) );
uaddr.sun_family = AF_UNIX;
memcpy ( uaddr.sun_path, sPath, len + 1 );
int iSock = socket ( AF_UNIX, SOCK_STREAM, 0 );
if ( iSock==-1 )
sphFatal ( "failed to create UNIX socket: %s", sphSockError() );
if ( unlink ( sPath )==-1 )
{
if ( errno!=ENOENT )
sphFatal ( "unlink() on UNIX socket file failed: %s", sphSockError() );
}
int iMask = umask ( 0 );
if ( bind ( iSock, (struct sockaddr *)&uaddr, sizeof(uaddr) )!=0 )
sphFatal ( "bind() on UNIX socket failed: %s", sphSockError() );
umask ( iMask );
return iSock;
}
#endif // !_WIN32
int sphCreateInetSocket ( const ListenerDesc_t & tDesc ) REQUIRES ( MainThread )
{
auto uAddr = tDesc.m_uIP;
auto iPort = tDesc.m_iPort;
char sAddress[SPH_ADDRESS_SIZE];
sphFormatIP ( sAddress, SPH_ADDRESS_SIZE, uAddr );
const char * sVip = tDesc.m_bVIP ? "VIP " : "";
const char * sRO = tDesc.m_bReadOnly ? "RO " : "";
if ( uAddr==htonl ( INADDR_ANY ) )
sphInfo ( "listening on all interfaces for %s%s%s, port=%d", sVip, sRO, RelaxedProtoName ( tDesc.m_eProto), iPort );
else
sphInfo ( "listening on %s:%d for %s%s%s", sAddress, iPort, sVip, sRO, RelaxedProtoName ( tDesc.m_eProto ) );
static struct sockaddr_in iaddr;
memset ( &iaddr, 0, sizeof(iaddr) );
iaddr.sin_family = AF_INET;
iaddr.sin_addr.s_addr = uAddr;
iaddr.sin_port = htons ( (short)iPort );
int iSock = socket ( AF_INET, SOCK_STREAM, 0 );
if ( iSock==-1 )
sphFatal ( "failed to create TCP socket: %s", sphSockError() );
sphSetSockReuseAddr ( iSock );
sphSetSockReusePort ( iSock );
sphSetSockNodelay ( iSock );
int iTries = 12;
int iRes;
do
{
iRes = bind ( iSock, (struct sockaddr *)&iaddr, sizeof(iaddr) );
if ( iRes==0 )
break;
sphInfo ( "bind() failed on %s, retrying...", sAddress );
sphLogDebug ( "bind() failed on %s:%d, error: %s", sAddress, iPort, sphSockError() );
sphSleepMsec ( 3000 );
} while ( --iTries>0 );
if ( iRes )
sphFatal ( "bind() failed on %s: %s", sAddress, sphSockError() );
return iSock;
}
ListenerDesc_t MakeAnyListener ( int iPort, Proto_e eProto=Proto_e::SPHINX )
{
ListenerDesc_t tDesc;
tDesc.m_eProto = eProto;
tDesc.m_uIP = htonl ( INADDR_ANY );
tDesc.m_iPort = iPort;
tDesc.m_iPortsCount = 0;
tDesc.m_bVIP = false;
tDesc.m_bReadOnly = false;
return tDesc;
}
ListenerDesc_t MakeLocalhostListener ( int iPort, Proto_e eProto )
{
ListenerDesc_t tDesc;
tDesc.m_eProto = eProto;
tDesc.m_uIP = htonl ( INADDR_LOOPBACK );
tDesc.m_iPort = iPort;
tDesc.m_iPortsCount = 0;
tDesc.m_bVIP = false;
tDesc.m_bReadOnly = false;
return tDesc;
}
// add any listener we will serve by our own (i.e. NO galera's since it is not our deal)
bool AddGlobalListener ( const ListenerDesc_t& tDesc ) REQUIRES ( MainThread )
{
if ( tDesc.m_eProto==Proto_e::REPLICATION )
return false;
Listener_t tListener;
tListener.m_eProto = tDesc.m_eProto;
tListener.m_bTcp = true;
tListener.m_bVIP = tDesc.m_bVIP;
tListener.m_bReadOnly = tDesc.m_bReadOnly;
#if !_WIN32
if ( !tDesc.m_sUnix.IsEmpty () )
{
tListener.m_iSock = sphCreateUnixSocket ( tDesc.m_sUnix.cstr () );
tListener.m_bTcp = false;
} else
#endif
tListener.m_iSock = sphCreateInetSocket ( tDesc );
g_dListeners.Add ( tListener );
return true;
}
struct ListenerPortRange_t
{
DWORD m_uIP { 0 };
int m_iPort { 0 };
int m_iCount { 0 };
static inline bool IsLess ( const ListenerPortRange_t & tA, const ListenerPortRange_t & tB )
{
if ( tA.m_uIP==tB.m_uIP )
return ( tA.m_iPort<tB.m_iPort );
return ( tA.m_uIP<tB.m_uIP );
}
CSphString Dump () const
{
char sAddress[SPH_ADDRESS_SIZE];
sphFormatIP ( sAddress, SPH_ADDRESS_SIZE, m_uIP );
CSphString sRes;
if ( m_iCount )
sRes.SetSprintf ( "%s:%d-%d", sAddress, m_iPort, ( m_iPort+m_iCount-1 ) );
else
sRes.SetSprintf ( "%s:%d", sAddress, m_iPort );
return sRes;
}
};
static bool ValidateListenerRanges ( const VecTraits_T<ListenerDesc_t> & dListeners, CSphString & sError )
{
CSphVector<ListenerPortRange_t> dPorts;
for ( const ListenerDesc_t & tDesc : dListeners )
{
if ( !tDesc.m_sUnix.IsEmpty() )
continue;
ListenerPortRange_t & tPort = dPorts.Add();
tPort.m_uIP = tDesc.m_uIP;
tPort.m_iPort = tDesc.m_iPort;
tPort.m_iCount = tDesc.m_iPortsCount;
}
dPorts.Sort ( ListenerPortRange_t() );
for ( int i=1; i<dPorts.GetLength(); i++ )
{
const ListenerPortRange_t & tPrev = dPorts[i-1];
const ListenerPortRange_t & tCur = dPorts[i];
if ( tPrev.m_uIP!=tCur.m_uIP )
continue;
if ( ( !tPrev.m_iCount && tPrev.m_iPort<tCur.m_iPort ) || ( tPrev.m_iCount && tPrev.m_iPort+tPrev.m_iCount-1<tCur.m_iPort ) )
continue;
sError.SetSprintf ( "invalid listener ports intersection %s -> %s", tPrev.Dump().cstr(), tCur.Dump().cstr() );
return false;
}
return true;
}
/////////////////////////////////////////////////////////////////////////////
// unpack Mysql Length-coded number
static int MysqlUnpack ( InputBuffer_c & tReq, DWORD * pSize )
{
assert ( pSize );
int iRes = tReq.GetByte();
--*pSize;
if ( iRes < 251 )
return iRes;
if ( iRes==0xFC )
{
*pSize -=2;
return tReq.GetByte() + ((int)tReq.GetByte()<<8);
}
if ( iRes==0xFD )
{
*pSize -= 3;
return tReq.GetByte() + ((int)tReq.GetByte()<<8) + ((int)tReq.GetByte()<<16);
}
if ( iRes==0xFE )
iRes = tReq.GetByte() + ((int)tReq.GetByte()<<8) + ((int)tReq.GetByte()<<16) + ((int)tReq.GetByte()<<24);
tReq.GetByte();
tReq.GetByte();
tReq.GetByte();
tReq.GetByte();
*pSize -= 8;
return iRes;
}
/////////////////////////////////////////////////////////////////////////////
void ISphOutputBuffer::SendBytes ( const void * pBuf, int iLen )
{
m_dBuf.Append ( pBuf, iLen );
}
void ISphOutputBuffer::SendBytes ( const char * pBuf )
{
if ( !pBuf )
return;
SendBytes ( pBuf, (int) strlen ( pBuf ) );
}
void ISphOutputBuffer::SendBytes ( const CSphString& sStr )
{
SendBytes ( sStr.cstr(), sStr.Length() );
}
void ISphOutputBuffer::SendBytes ( const Str_t& sStr )
{
m_dBuf.Append ( sStr );
}
void ISphOutputBuffer::SendBytes ( const VecTraits_T<BYTE> & dBuf )
{
m_dBuf.Append ( dBuf );
}
void ISphOutputBuffer::SendBytes ( const StringBuilder_c &dBuf )
{
SendBytes ( dBuf.begin(), dBuf.GetLength () );
}
void ISphOutputBuffer::SendBytes ( ByteBlob_t dData )
{
SendBytes ( dData.first, dData.second );
}
void ISphOutputBuffer::SendArray ( const ISphOutputBuffer &tOut )
{
int iLen = tOut.m_dBuf.GetLength();
SendInt ( iLen );
SendBytes ( tOut.m_dBuf.Begin(), iLen );
}
void ISphOutputBuffer::SendArray ( const VecTraits_T<BYTE> &dBuf, int iElems )
{
if ( iElems==-1 )
{
SendInt ( dBuf.GetLength () );
SendBytes ( dBuf );
return;
}
assert ( dBuf.GetLength() == (int) dBuf.GetLengthBytes() );
assert ( iElems<=dBuf.GetLength ());
SendInt ( iElems );
SendBytes ( dBuf.begin(), iElems );
}
void ISphOutputBuffer::SendArray ( const void * pBuf, int iLen )
{
if ( !pBuf )
iLen=0;
assert ( iLen>=0 );
SendInt ( iLen );
SendBytes ( pBuf, iLen );
}
void ISphOutputBuffer::SendArray ( const StringBuilder_c &dBuf )
{
SendArray ( dBuf.begin(), dBuf.GetLength () );
}
void ISphOutputBuffer::SendArray ( ByteBlob_t dData )
{
SendArray ( dData.first, dData.second );
}
void SendErrorReply ( ISphOutputBuffer & tOut, const char * sTemplate, ... )
{
CSphString sError;
va_list ap;
va_start ( ap, sTemplate );
sError.SetSprintfVa ( sTemplate, ap );
va_end ( ap );
auto tHdr = APIHeader ( tOut, SEARCHD_ERROR );
tOut.SendString ( sError.cstr() );
// --console logging
if ( g_bOptNoDetach && g_eLogFormat!=LOG_FORMAT_SPHINXQL )
sphInfo ( "query error: %s", sError.cstr() );
}
void DistributedIndex_t::GetAllHosts ( VectorAgentConn_t &dTarget ) const
{
for ( const auto& pMultiAgent : m_dAgents )
for ( const auto & dHost : *pMultiAgent )
{
auto * pAgent = new AgentConn_t;
pAgent->m_tDesc.CloneFrom ( dHost );
pAgent->m_iMyQueryTimeoutMs = GetAgentConnectTimeoutMs();
pAgent->m_iMyConnectTimeoutMs = GetAgentQueryTimeoutMs();
dTarget.Add ( pAgent );
}
}
DistributedIndex_t::~DistributedIndex_t ()
{
sphLogDebugv ( "DistributedIndex_t %p removed", this );
// cleanup global
m_dAgents.Reset();
MultiAgentDesc_c::CleanupOrphaned ();
};
int DistributedIndex_t::GetAgentConnectTimeoutMs ( bool bRaw ) const
{
return ( ( m_iAgentConnectTimeoutMs || bRaw ) ? m_iAgentConnectTimeoutMs : g_iAgentConnectTimeoutMs );
}
int DistributedIndex_t::GetAgentQueryTimeoutMs ( bool bRaw ) const
{
return ( ( m_iAgentQueryTimeoutMs || bRaw ) ? m_iAgentQueryTimeoutMs : g_iAgentQueryTimeoutMs );
}
void DistributedIndex_t::SetAgentConnectTimeoutMs ( int iAgentConnectTimeoutMs )
{
m_iAgentConnectTimeoutMs = iAgentConnectTimeoutMs;
}
void DistributedIndex_t::SetAgentQueryTimeoutMs ( int iAgentQueryTimeoutMs )
{
m_iAgentQueryTimeoutMs = iAgentQueryTimeoutMs;
}
DistributedIndex_t * DistributedIndex_t::Clone() const
{
DistributedIndex_t * pDist ( new DistributedIndex_t );
pDist->m_dAgents = m_dAgents;
pDist->m_dLocal = m_dLocal;
pDist->m_iAgentRetryCount = m_iAgentRetryCount;
pDist->m_bDivideRemoteRanges = m_bDivideRemoteRanges;
pDist->m_eHaStrategy = m_eHaStrategy;
pDist->m_sCluster = m_sCluster;
pDist->m_iAgentConnectTimeoutMs = m_iAgentConnectTimeoutMs;
pDist->m_iAgentQueryTimeoutMs = m_iAgentQueryTimeoutMs;
return pDist;
}
/////////////////////////////////////////////////////////////////////////////
// SEARCH HANDLER
/////////////////////////////////////////////////////////////////////////////
class SearchRequestBuilder_c : public RequestBuilder_i
{
public:
SearchRequestBuilder_c ( const VecTraits_T<CSphQuery> & dQueries, int iDivideLimits )
: m_dQueries ( dQueries ), m_iDivideLimits ( iDivideLimits )
{}
void BuildRequest ( const AgentConn_t & tAgent, ISphOutputBuffer & tOut ) const final;
protected:
void SendQuery ( const char * sIndexes, ISphOutputBuffer & tOut, const CSphQuery & q, int iWeight ) const;
protected:
const VecTraits_T<CSphQuery> & m_dQueries;
const int m_iDivideLimits;
};
class SearchReplyParser_c : public ReplyParser_i, public ISphNoncopyable
{
public:
explicit SearchReplyParser_c ( int iResults )
: m_iResults ( iResults )
{}
bool ParseReply ( MemInputBuffer_c & tReq, AgentConn_t & tAgent ) const final;
private:
int m_iResults;
static void ParseSchema ( OneResultset_t & tRes, MemInputBuffer_c & tReq );
static void ParseMatch ( CSphMatch & tMatch, MemInputBuffer_c & tReq, const CSphSchema & tSchema, bool bAgent64 );
};
/////////////////////////////////////////////////////////////////////////////
/// qflag means Query Flag
/// names are internal to searchd and may be changed for clarity
/// values are communicated over network between searchds and APIs and MUST NOT CHANGE
enum
{
QFLAG_REVERSE_SCAN = 1UL << 0, // deprecated
QFLAG_SORT_KBUFFER = 1UL << 1,
QFLAG_MAX_PREDICTED_TIME = 1UL << 2,
QFLAG_SIMPLIFY = 1UL << 3,
QFLAG_PLAIN_IDF = 1UL << 4,
QFLAG_GLOBAL_IDF = 1UL << 5,
QFLAG_NORMALIZED_TF = 1UL << 6,
QFLAG_LOCAL_DF = 1UL << 7,
QFLAG_LOW_PRIORITY = 1UL << 8,
QFLAG_FACET = 1UL << 9,
QFLAG_FACET_HEAD = 1UL << 10,
QFLAG_JSON_QUERY = 1UL << 11,
QFLAG_NOT_ONLY_ALLOWED = 1UL << 12,
QFLAG_LOCAL_DF_SET = 1UL << 13
};
void operator<< ( ISphOutputBuffer & tOut, const CSphNamedInt & tValue )
{
tOut.SendString ( tValue.first.cstr () );
tOut.SendInt ( tValue.second );
}
void operator>> ( InputBuffer_c & dIn, CSphNamedInt & tValue )
{
tValue.first = dIn.GetString ();
tValue.second = dIn.GetInt ();
}
void SearchRequestBuilder_c::SendQuery ( const char * sIndexes, ISphOutputBuffer & tOut, const CSphQuery & q, int iWeight ) const
{
bool bAgentWeight = ( iWeight!=-1 );
// starting with command version 1.27, flags go first
// reason being, i might add flags that affect *any* of the subsequent data (eg. qflag_pack_ints)
DWORD uFlags = 0;
uFlags |= QFLAG_SORT_KBUFFER * q.m_bSortKbuffer;
uFlags |= QFLAG_MAX_PREDICTED_TIME * ( q.m_iMaxPredictedMsec > 0 );
uFlags |= QFLAG_SIMPLIFY * q.m_bSimplify;
uFlags |= QFLAG_PLAIN_IDF * q.m_bPlainIDF;
uFlags |= QFLAG_GLOBAL_IDF * q.m_bGlobalIDF;
uFlags |= QFLAG_NORMALIZED_TF * q.m_bNormalizedTFIDF;
uFlags |= QFLAG_LOCAL_DF * q.m_bLocalDF.value_or ( false );
uFlags |= QFLAG_LOW_PRIORITY * q.m_bLowPriority;
uFlags |= QFLAG_FACET * q.m_bFacet;
uFlags |= QFLAG_FACET_HEAD * q.m_bFacetHead;
uFlags |= QFLAG_NOT_ONLY_ALLOWED * q.m_bNotOnlyAllowed;
uFlags |= QFLAG_LOCAL_DF_SET * q.m_bLocalDF.has_value();
if ( q.m_eQueryType==QUERY_JSON )
uFlags |= QFLAG_JSON_QUERY;
tOut.SendDword ( uFlags );
// The Search Legacy
tOut.SendInt ( 0 ); // offset is 0
if ( !q.m_bHasOuter )
{
if ( m_iDivideLimits==1 )
tOut.SendInt ( q.m_iMaxMatches ); // OPTIMIZE? normally, agent limit is max_matches, even if master limit is less
else // FIXME!!! that is broken with offset + limit
tOut.SendInt ( 1 + ( ( q.m_iOffset + q.m_iLimit )/m_iDivideLimits) );
} else
{
// with outer order by, inner limit must match between agent and master
tOut.SendInt ( q.m_iLimit );
}
tOut.SendInt ( (DWORD)q.m_eMode ); // match mode
tOut.SendInt ( (DWORD)q.m_eRanker ); // ranking mode
if ( q.m_eRanker==SPH_RANK_EXPR || q.m_eRanker==SPH_RANK_EXPORT )
tOut.SendString ( q.m_sRankerExpr.cstr() );
tOut.SendInt ( q.m_eSort ); // sort mode
tOut.SendString ( q.m_sSortBy.cstr() ); // sort attr
if ( q.m_eQueryType==QUERY_JSON )
tOut.SendString ( q.m_sQuery.cstr() );
else
{
if ( q.m_sRawQuery.IsEmpty() )
tOut.SendString ( q.m_sQuery.cstr() );
else
tOut.SendString ( q.m_sRawQuery.cstr() ); // query
}
tOut.SendInt ( q.m_dWeights.GetLength() );
ARRAY_FOREACH ( j, q.m_dWeights )
tOut.SendInt ( q.m_dWeights[j] ); // weights
tOut.SendString ( sIndexes ); // indexes
tOut.SendInt ( 1 ); // id range bits
tOut.SendUint64 ( uint64_t(0) ); // default full id range (any client range must be in filters at this stage)
tOut.SendUint64 ( UINT64_MAX );
tOut.SendInt ( q.m_dFilters.GetLength() );
ARRAY_FOREACH ( j, q.m_dFilters )
{
const CSphFilterSettings & tFilter = q.m_dFilters[j];
tOut.SendString ( tFilter.m_sAttrName.cstr() );
tOut.SendInt ( tFilter.m_eType );
switch ( tFilter.m_eType )
{
case SPH_FILTER_VALUES:
tOut.SendInt ( tFilter.GetNumValues() );
for ( auto uValue : tFilter.GetValues () )
tOut.SendUint64 ( uValue );
break;
case SPH_FILTER_RANGE:
tOut.SendUint64 ( tFilter.m_iMinValue );
tOut.SendUint64 ( tFilter.m_iMaxValue );
break;
case SPH_FILTER_FLOATRANGE:
tOut.SendFloat ( tFilter.m_fMinValue );
tOut.SendFloat ( tFilter.m_fMaxValue );
break;
case SPH_FILTER_USERVAR:
case SPH_FILTER_STRING:
tOut.SendString ( tFilter.m_dStrings.GetLength()==1 ? tFilter.m_dStrings[0].cstr() : nullptr );
tOut.SendByte ( (BYTE)tFilter.m_eStrCmpDir );
break;
case SPH_FILTER_NULL:
tOut.SendByte ( tFilter.m_bIsNull );
break;
case SPH_FILTER_STRING_LIST:
tOut.SendInt ( tFilter.m_dStrings.GetLength() );
ARRAY_FOREACH ( iString, tFilter.m_dStrings )
tOut.SendString ( tFilter.m_dStrings[iString].cstr() );
break;
case SPH_FILTER_EXPRESSION: // need only name and type
break;
}
tOut.SendInt ( tFilter.m_bExclude );
tOut.SendInt ( tFilter.m_bHasEqualMin );
tOut.SendInt ( tFilter.m_bHasEqualMax );
tOut.SendInt ( tFilter.m_bOpenLeft );
tOut.SendInt ( tFilter.m_bOpenRight );
tOut.SendInt ( tFilter.m_eMvaFunc );
}
tOut.SendInt ( q.m_eGroupFunc );
tOut.SendString ( q.m_sGroupBy.cstr() );
if ( m_iDivideLimits==1 )
tOut.SendInt ( q.m_iMaxMatches );
else
tOut.SendInt ( 1+(q.m_iMaxMatches/m_iDivideLimits) ); // Reduce the max_matches also.
tOut.SendString ( q.m_sGroupSortBy.cstr() );
tOut.SendInt ( q.m_iCutoff );
tOut.SendInt ( q.m_iRetryCount<0 ? 0 : q.m_iRetryCount ); // runaround for old clients.
tOut.SendInt ( q.m_iRetryDelay<0 ? 0 : q.m_iRetryDelay );
tOut.SendString ( q.m_sGroupDistinct.cstr() );
tOut.SendInt ( q.m_bGeoAnchor );
if ( q.m_bGeoAnchor )
{
tOut.SendString ( q.m_sGeoLatAttr.cstr() );
tOut.SendString ( q.m_sGeoLongAttr.cstr() );
tOut.SendFloat ( q.m_fGeoLatitude );
tOut.SendFloat ( q.m_fGeoLongitude );
}
if ( bAgentWeight )
{
tOut.SendInt ( 1 );
tOut.SendString ( "*" );
tOut.SendInt ( iWeight );
} else
{
tOut.SendInt ( q.m_dIndexWeights.GetLength() );
for ( const auto& dWeight : q.m_dIndexWeights )
tOut << dWeight;
}
tOut.SendDword ( q.m_uMaxQueryMsec );
tOut.SendInt ( q.m_dFieldWeights.GetLength() );
for ( const auto & dWeight : q.m_dFieldWeights )
tOut << dWeight;
tOut.SendString ( q.m_sComment.cstr() );
tOut.SendInt ( 0 ); // WAS: overrides
tOut.SendString ( q.m_sSelect.cstr() );
if ( q.m_iMaxPredictedMsec>0 )
tOut.SendInt ( q.m_iMaxPredictedMsec );
// emulate empty sud-select for agent (client ver 1.29) as master sends fixed outer offset+limits
tOut.SendString ( NULL );
tOut.SendInt ( 0 );
tOut.SendInt ( 0 );
tOut.SendInt ( q.m_bHasOuter );
// v.1.36
tOut.SendInt ( q.m_iExpansionLimit );
// master-agent extensions
tOut.SendDword ( q.m_eCollation ); // v.1
tOut.SendString ( q.m_sOuterOrderBy.cstr() ); // v.2
if ( q.m_bHasOuter )
tOut.SendInt ( q.m_iOuterOffset + q.m_iOuterLimit );
tOut.SendInt ( q.m_iGroupbyLimit );
tOut.SendString ( q.m_sUDRanker.cstr() );
tOut.SendString ( q.m_sUDRankerOpts.cstr() );
tOut.SendString ( q.m_sQueryTokenFilterLib.cstr() );
tOut.SendString ( q.m_sQueryTokenFilterName.cstr() );
tOut.SendString ( q.m_sQueryTokenFilterOpts.cstr() );
tOut.SendInt ( q.m_dFilterTree.GetLength() );
ARRAY_FOREACH ( i, q.m_dFilterTree )
{
tOut.SendInt ( q.m_dFilterTree[i].m_iLeft );
tOut.SendInt ( q.m_dFilterTree[i].m_iRight );
tOut.SendInt ( q.m_dFilterTree[i].m_iFilterItem );
tOut.SendInt ( q.m_dFilterTree[i].m_bOr );
}
tOut.SendInt( q.m_dItems.GetLength() );
ARRAY_FOREACH ( i, q.m_dItems )
{
const CSphQueryItem & tItem = q.m_dItems[i];
tOut.SendString ( tItem.m_sAlias.cstr() );
tOut.SendString ( tItem.m_sExpr.cstr() );
tOut.SendDword ( tItem.m_eAggrFunc );
}
tOut.SendInt( q.m_dRefItems.GetLength() );
ARRAY_FOREACH ( i, q.m_dRefItems )
{
const CSphQueryItem & tItem = q.m_dRefItems[i];
tOut.SendString ( tItem.m_sAlias.cstr() );
tOut.SendString ( tItem.m_sExpr.cstr() );
tOut.SendDword ( tItem.m_eAggrFunc );
}
tOut.SendDword ( q.m_eExpandKeywords );
tOut.SendInt ( q.m_dIndexHints.GetLength() );
for ( const auto & i : q.m_dIndexHints )
{
tOut.SendString ( i.m_sIndex.cstr() );
tOut.SendDword ( (DWORD)i.m_eType );
tOut.SendDword ( (DWORD)i.m_bForce );
}
tOut.SendInt ( (int)q.m_eJoinType );
tOut.SendString ( q.m_sJoinIdx.cstr() );
tOut.SendString ( q.m_sJoinQuery.cstr() );
tOut.SendInt ( q.m_dOnFilters.GetLength() );
for ( const auto & i : q.m_dOnFilters )
{
tOut.SendString ( i.m_sIdx1.cstr() );
tOut.SendString ( i.m_sAttr1.cstr() );
tOut.SendString ( i.m_sIdx2.cstr() );
tOut.SendString ( i.m_sAttr2.cstr() );
tOut.SendInt ( (int)i.m_eTypeCast1 );
tOut.SendInt ( (int)i.m_eTypeCast2 );
}
tOut.SendString ( q.m_sKNNAttr.cstr() );
if ( !q.m_sKNNAttr.IsEmpty() )
{
tOut.SendInt ( q.m_iKNNK );
tOut.SendInt ( q.m_iKnnEf );
tOut.SendInt ( q.m_dKNNVec.GetLength() );
for ( const auto & i : q.m_dKNNVec )
tOut.SendFloat(i);
}
tOut.SendInt ( (int)q.m_eJiebaMode );
}
void SearchRequestBuilder_c::BuildRequest ( const AgentConn_t & tAgent, ISphOutputBuffer & tOut ) const
{
auto tHdr = APIHeader ( tOut, SEARCHD_COMMAND_SEARCH, VER_COMMAND_SEARCH ); // API header
tOut.SendInt ( VER_COMMAND_SEARCH_MASTER );
tOut.SendInt ( m_dQueries.GetLength() );
for ( auto& dQuery : m_dQueries )
SendQuery ( tAgent.m_tDesc.m_sIndexes.cstr (), tOut, dQuery, tAgent.m_iWeight );
}
struct cSearchResult : public iQueryResult
{
CSphVector<AggrResult_t> m_dResults;
void Reset () final
{
m_dResults.Reset();
}
bool HasWarnings () const final
{
return m_dResults.any_of ( [] ( const AggrResult_t &dRes ) { return !dRes.m_sWarning.IsEmpty (); } );
}
};
/////////////////////////////////////////////////////////////////////////////
void SearchReplyParser_c::ParseMatch ( CSphMatch & tMatch, MemInputBuffer_c & tReq, const CSphSchema & tSchema, bool bAgent64 )
{
tMatch.Reset ( tSchema.GetRowSize() );
// WAS: docids
if ( bAgent64 )
tReq.GetUint64();
else
tReq.GetDword();
tMatch.m_iWeight = tReq.GetInt ();
for ( int i=0; i<tSchema.GetAttrsCount(); ++i )
{
const CSphColumnInfo & tAttr = tSchema.GetAttr(i);
assert ( sphPlainAttrToPtrAttr(tAttr.m_eAttrType)==tAttr.m_eAttrType );
switch ( tAttr.m_eAttrType )
{
case SPH_ATTR_UINT32SET_PTR:
case SPH_ATTR_INT64SET_PTR:
{
int iValues = tReq.GetDword ();
BYTE * pData = nullptr;
BYTE * pPacked = sphPackPtrAttr ( iValues*sizeof(DWORD), &pData );
tMatch.SetAttr ( tAttr.m_tLocator, (SphAttr_t)pPacked );
auto * pMVA = (DWORD *)pData;
if ( tAttr.m_eAttrType==SPH_ATTR_UINT32SET_PTR )
{
while ( iValues-- )
sphUnalignedWrite ( pMVA++, tReq.GetDword() );
} else
{
assert ( ( iValues%2 )==0 );
for ( ; iValues; iValues -= 2 )
{
uint64_t uMva = tReq.GetUint64();
sphUnalignedWrite ( pMVA, uMva );
pMVA += 2;
}
}
}
break;
case SPH_ATTR_FLOAT_VECTOR_PTR:
{
int iValues = tReq.GetDword ();
BYTE * pData = nullptr;
BYTE * pPacked = sphPackPtrAttr ( iValues*sizeof(DWORD), &pData );
tMatch.SetAttr ( tAttr.m_tLocator, (SphAttr_t)pPacked );
auto * pFloatVec = (float *)pData;
while ( iValues-- )
sphUnalignedWrite ( pFloatVec++, tReq.GetFloat() );
}
break;
case SPH_ATTR_STRINGPTR:
case SPH_ATTR_JSON_PTR:
case SPH_ATTR_FACTORS:
case SPH_ATTR_FACTORS_JSON:
{
int iLen = tReq.GetDword();
BYTE * pData = nullptr;
if (iLen)
{
tMatch.SetAttr ( tAttr.m_tLocator, (SphAttr_t)sphPackPtrAttr ( iLen, &pData ) );
tReq.GetBytes ( pData, iLen );
} else
tMatch.SetAttr ( tAttr.m_tLocator, (SphAttr_t) 0 );
}
break;
case SPH_ATTR_JSON_FIELD_PTR:
{
// FIXME: no reason for json_field to be any different from other *_PTR attributes
auto eJson = (ESphJsonType)tReq.GetByte();
if ( eJson==JSON_EOF )
tMatch.SetAttr ( tAttr.m_tLocator, 0 );
else
{
int iLen = tReq.GetDword();
BYTE * pData = nullptr;
tMatch.SetAttr ( tAttr.m_tLocator, (SphAttr_t)sphPackPtrAttr ( iLen+1, &pData ) );
*pData++ = (BYTE)eJson;
tReq.GetBytes ( pData, iLen );
}
}
break;
case SPH_ATTR_FLOAT:
tMatch.SetAttrFloat ( tAttr.m_tLocator, tReq.GetFloat() );
break;
case SPH_ATTR_DOUBLE:
tMatch.SetAttrDouble ( tAttr.m_tLocator, tReq.GetDouble() );
break;
case SPH_ATTR_BIGINT:
case SPH_ATTR_UINT64:
tMatch.SetAttr ( tAttr.m_tLocator, tReq.GetUint64() );
break;
default:
tMatch.SetAttr ( tAttr.m_tLocator, tReq.GetDword() );
break;
}
}
}
void SearchReplyParser_c::ParseSchema ( OneResultset_t & tRes, MemInputBuffer_c & tReq )
{
CSphSchema & tSchema = tRes.m_tSchema;
tSchema.Reset ();
int nFields = tReq.GetInt(); // FIXME! add a sanity check
for ( int j = 0; j < nFields; ++j )
tSchema.AddField ( tReq.GetString().cstr() );
int iNumAttrs = tReq.GetInt(); // FIXME! add a sanity check
for ( int j=0; j<iNumAttrs; ++j )
{
CSphColumnInfo tCol;
tCol.m_sName = tReq.GetString ();
tCol.m_eAttrType = (ESphAttr) tReq.GetDword (); // FIXME! add a sanity check
// we always work with plain attrs (not *_PTR) when working with agents
tCol.m_eAttrType = sphPlainAttrToPtrAttr ( tCol.m_eAttrType );
if ( tCol.m_eAttrType==SPH_ATTR_STORED_FIELD )
{
tCol.m_eAttrType = SPH_ATTR_STRINGPTR;
tCol.m_uFieldFlags = CSphColumnInfo::FIELD_STORED;
}
tSchema.AddAttr ( tCol, true ); // all attributes received from agents are dynamic
}
}
bool SearchReplyParser_c::ParseReply ( MemInputBuffer_c & tReq, AgentConn_t & tAgent ) const
{
const int iResults = m_iResults;
assert ( iResults>0 );
if ( !tAgent.m_pResult )
tAgent.m_pResult = std::make_unique<cSearchResult>();
auto pResult = (cSearchResult*)tAgent.m_pResult.get();
auto &dResults = pResult->m_dResults;
dResults.Resize ( iResults );
for ( auto & tRes : dResults )
{
tRes.m_iSuccesses = 0;
OneResultset_t tChunk;
tChunk.m_iTag = tAgent.m_iStoreTag;
tChunk.m_bTag = true;
tChunk.m_pAgent = &tAgent;
tRes.m_sError = "";
tRes.m_sWarning = "";
// get status and message
auto eStatus = ( SearchdStatus_e ) tReq.GetDword ();
switch ( eStatus )
{
case SEARCHD_ERROR: tRes.m_sError = tReq.GetString (); continue;
case SEARCHD_RETRY: tRes.m_sError = tReq.GetString (); break;
case SEARCHD_WARNING: tRes.m_sWarning = tReq.GetString (); break;
default: tAgent.m_sFailure.SetSprintf ( "internal error: unknown status %d, message %s", eStatus, tReq.GetString ().cstr() );
case SEARCHD_OK: break;
}
ParseSchema ( tChunk, tReq );
// get matches
int iMatches = tReq.GetInt ();
if ( iMatches<0 )
{
tAgent.m_sFailure.SetSprintf ( "invalid match count received (count=%d)", iMatches );
return false;
}
bool bAgent64 = !!tReq.GetInt();
if ( !bAgent64 )
{
tAgent.m_sFailure.SetSprintf ( "agent has 32-bit docids; no longer supported" );
return false;
}
tChunk.m_dMatches.Resize ( iMatches );
for ( auto & tMatch : tChunk.m_dMatches )
ParseMatch ( tMatch, tReq, tChunk.m_tSchema, bAgent64 );
// read totals (retrieved count, total count, query time, word count)
int iRetrieved = tReq.GetInt ();
tRes.m_iTotalMatches = tReq.GetInt ();
tRes.m_bTotalMatchesApprox = !!tReq.GetInt();
tRes.m_iQueryTime = tReq.GetInt ();
// agents always send IO/CPU stats to master
BYTE uStatMask = tReq.GetByte();
if ( uStatMask & 1U )
{
tRes.m_tIOStats.m_iReadTime = tReq.GetUint64();
tRes.m_tIOStats.m_iReadOps = tReq.GetDword();
tRes.m_tIOStats.m_iReadBytes = tReq.GetUint64();
tRes.m_tIOStats.m_iWriteTime = tReq.GetUint64();
tRes.m_tIOStats.m_iWriteOps = tReq.GetDword();
tRes.m_tIOStats.m_iWriteBytes = tReq.GetUint64();
}
if ( uStatMask & 2U )
tRes.m_iCpuTime = tReq.GetUint64();
if ( uStatMask & 4U )
tRes.m_iPredictedTime = tReq.GetUint64();
tRes.m_iAgentFetchedDocs = tReq.GetDword();
tRes.m_iAgentFetchedHits = tReq.GetDword();
tRes.m_iAgentFetchedSkips = tReq.GetDword();
const int iWordsCount = tReq.GetInt (); // FIXME! sanity check?
if ( iRetrieved!=iMatches )
{
tAgent.m_sFailure.SetSprintf ( "expected %d retrieved documents, got %d", iMatches, iRetrieved );
return false;
}
// read per-word stats
for ( int i=0; i<iWordsCount; ++i )
{
const CSphString sWord = tReq.GetString ();
const int64_t iDocs = (unsigned int)tReq.GetInt ();
const int64_t iHits = (unsigned int)tReq.GetInt ();
tReq.GetByte(); // statistics have no expanded terms for now
tRes.AddStat ( sWord, iDocs, iHits );
}
// mark this result as ok
auto& tNewChunk = tRes.m_dResults.Add ();
::Swap ( tNewChunk, tChunk );
tRes.m_iSuccesses = 1;
}
// all seems OK (and buffer length checks are performed by caller)
return true;
}
/////////////////////////////////////////////////////////////////////////////
// returns true if incoming schema (src) is compatible with existing (dst); false otherwise
bool MinimizeSchema ( CSphSchema & tDst, const ISphSchema & tSrc )
{
// if dst is empty, result is also empty
if ( tDst.GetAttrsCount()==0 )
return tSrc.GetAttrsCount()==0;
// check for equality, and remove all dst attributes that are not present in src
CSphVector<CSphColumnInfo> dDst;
for ( int i = 0, iAttrsCount = tDst.GetAttrsCount (); i<iAttrsCount; ++i )
dDst.Add ( tDst.GetAttr(i) );
bool bEqual = ( tDst.GetAttrsCount()==tSrc.GetAttrsCount() );
ARRAY_FOREACH ( i, dDst )
{
auto& tDstAttr = dDst[i];
int iSrcIdx = tSrc.GetAttrIndex ( tDstAttr.m_sName.cstr() );
// check for index mismatch
if ( iSrcIdx!=i )
bEqual = false;
// check for type/size mismatch (and fixup if needed)
if ( iSrcIdx>=0 )
{
const CSphColumnInfo & tSrcAttr = tSrc.GetAttr ( iSrcIdx );
// should seamlessly convert ( bool > float ) | ( bool > int > bigint )
ESphAttr eDst = tDstAttr.m_eAttrType;
ESphAttr eSrc = tSrcAttr.m_eAttrType;
bool bSame = ( eDst==eSrc )
|| ( ( eDst==SPH_ATTR_FLOAT && eSrc==SPH_ATTR_BOOL ) || ( eDst==SPH_ATTR_BOOL && eSrc==SPH_ATTR_FLOAT ) )
|| ( ( eDst==SPH_ATTR_BOOL || eDst==SPH_ATTR_INTEGER || eDst==SPH_ATTR_BIGINT )
&& ( eSrc==SPH_ATTR_BOOL || eSrc==SPH_ATTR_INTEGER || eSrc==SPH_ATTR_BIGINT ) );
int iDstBitCount = tDstAttr.m_tLocator.m_iBitCount;
int iSrcBitCount = tSrcAttr.m_tLocator.m_iBitCount;
if ( !bSame )
{
// different types? remove the attr
iSrcIdx = -1;
bEqual = false;
} else if ( iDstBitCount!=iSrcBitCount )
{
// different bit sizes? choose the max one
tDstAttr.m_tLocator.m_iBitCount = Max ( iDstBitCount, iSrcBitCount );
bEqual = false;
if ( iDstBitCount<iSrcBitCount )
tDstAttr.m_eAttrType = tSrcAttr.m_eAttrType;
}
if ( tSrcAttr.m_tLocator.m_iBitOffset!=tDstAttr.m_tLocator.m_iBitOffset )
{
// different offsets? have to force target dynamic then, since we can't use one locator for all matches
bEqual = false;
}
if ( tSrcAttr.m_tLocator.m_bDynamic!=tDstAttr.m_tLocator.m_bDynamic )
{
// different location? have to force target dynamic then
bEqual = false;
}
}
// check for presence
if ( iSrcIdx<0 )
{
dDst.Remove ( i );
--i;
}
}
if ( !bEqual )
{
CSphVector<CSphColumnInfo> dFields { tDst.GetFieldsCount() };
for ( int i = 0, iFieldsCount = tDst.GetFieldsCount (); i<iFieldsCount; ++i )
dFields[i] = tDst.GetField(i);
tDst.Reset();
for ( auto& dAttr : dDst )
tDst.AddAttr ( dAttr, true );
for ( auto& dField: dFields )
tDst.AddField ( dField );
} else
tDst.SwapAttrs ( dDst );
return bEqual;
}
static void CheckQuery ( const CSphQuery & tQuery, CSphString & sError, bool bCanLimitless=false )
{
#define LOC_ERROR( ... ) do { sError.SetSprintf (__VA_ARGS__); return; } while(0)
sError = nullptr;
if ( (int)tQuery.m_eMode<0 || tQuery.m_eMode>SPH_MATCH_TOTAL )
LOC_ERROR ( "invalid match mode %d", tQuery.m_eMode );
if ( (int)tQuery.m_eRanker<0 || tQuery.m_eRanker>SPH_RANK_TOTAL )
LOC_ERROR ( "invalid ranking mode %d", tQuery.m_eRanker );
if ( tQuery.m_iMaxMatches<1 )
LOC_ERROR ( "max_matches can not be less than one" );
if ( tQuery.m_iOffset<0 || tQuery.m_iOffset>=tQuery.m_iMaxMatches )
LOC_ERROR ( "offset out of bounds (offset=%d, max_matches=%d)", tQuery.m_iOffset, tQuery.m_iMaxMatches );
if ( tQuery.m_iLimit < ( bCanLimitless ? -1 : 0 ) ) // -1 is magic for 'limitless select'
LOC_ERROR ( "limit out of bounds (limit=%d)", tQuery.m_iLimit );
if ( tQuery.m_iCutoff<-1 )
LOC_ERROR ( "cutoff out of bounds (cutoff=%d)", tQuery.m_iCutoff );
if ( ( tQuery.m_iRetryCount!=-1 ) && ( tQuery.m_iRetryCount>DAEMON_MAX_RETRY_COUNT ) )
LOC_ERROR ( "retry count out of bounds (count=%d)", tQuery.m_iRetryCount );
if ( ( tQuery.m_iRetryDelay!=-1 ) && ( tQuery.m_iRetryDelay>DAEMON_MAX_RETRY_DELAY ) )
LOC_ERROR ( "retry delay out of bounds (delay=%d)", tQuery.m_iRetryDelay );
if ( tQuery.m_iOffset>0 && tQuery.m_bHasOuter )
LOC_ERROR ( "inner offset must be 0 when using outer order by (offset=%d)", tQuery.m_iOffset );
#undef LOC_ERROR
}
void PrepareQueryEmulation ( CSphQuery * pQuery )
{
if ( pQuery->m_eMode==SPH_MATCH_BOOLEAN )
pQuery->m_eRanker = SPH_RANK_NONE;
if ( pQuery->m_eMode==SPH_MATCH_FULLSCAN )
pQuery->m_sQuery = "";
if ( pQuery->m_eMode!=SPH_MATCH_ALL && pQuery->m_eMode!=SPH_MATCH_ANY && pQuery->m_eMode!=SPH_MATCH_PHRASE )
return;
const char * szQuery = pQuery->m_sRawQuery.cstr ();
int iQueryLen = szQuery ? (int) strlen(szQuery) : 0;
pQuery->m_sQuery.Reserve ( iQueryLen*2+8 );
char * szRes = (char*) pQuery->m_sQuery.cstr ();
char c;
if ( pQuery->m_eMode==SPH_MATCH_ANY || pQuery->m_eMode==SPH_MATCH_PHRASE )
*szRes++ = '\"';
if ( iQueryLen )
{
while ( ( c = *szQuery++ )!=0 )
{
// must be in sync with EscapeString (php api)
const char sMagics[] = "<\\()|-!@~\"&/^$=";
for ( const char * s = sMagics; *s; s++ )
if ( c==*s )
{
*szRes++ = '\\';
break;
}
*szRes++ = c;
}
}
switch ( pQuery->m_eMode )
{
case SPH_MATCH_ALL: pQuery->m_eRanker = SPH_RANK_PROXIMITY; *szRes = '\0'; break;
case SPH_MATCH_ANY: pQuery->m_eRanker = SPH_RANK_MATCHANY; strncpy ( szRes, "\"/1", 8 ); break;
case SPH_MATCH_PHRASE: pQuery->m_eRanker = SPH_RANK_PROXIMITY; *szRes++ = '\"'; *szRes = '\0'; break;
default: return;
}
}
static void FixupQuerySettings ( CSphQuery & tQuery )
{
// sort filters
for ( auto & i : tQuery.m_dFilters )
i.m_dValues.Sort();
if ( !tQuery.m_bHasOuter )
{
tQuery.m_sOuterOrderBy = "";
tQuery.m_iOuterOffset = 0;
tQuery.m_iOuterLimit = 0;
}
}
static bool ParseSearchFilter ( CSphFilterSettings & tFilter, InputBuffer_c & tReq, ISphOutputBuffer & tOut, int iMasterVer, DWORD uVer )
{
tFilter.m_sAttrName = tReq.GetString ();
sphColumnToLowercase ( const_cast<char *>( tFilter.m_sAttrName.cstr() ) );
tFilter.m_eType = (ESphFilter) tReq.GetDword ();
switch ( tFilter.m_eType )
{
case SPH_FILTER_RANGE:
tFilter.m_iMinValue = tReq.GetUint64();
tFilter.m_iMaxValue = tReq.GetUint64();
break;
case SPH_FILTER_FLOATRANGE:
tFilter.m_fMinValue = tReq.GetFloat ();
tFilter.m_fMaxValue = tReq.GetFloat ();
break;
case SPH_FILTER_VALUES:
{
int iGot = 0;
bool bRes = tReq.GetQwords ( tFilter.m_dValues, iGot, g_iMaxFilterValues );
if ( !bRes )
{
SendErrorReply ( tOut, "invalid attribute '%s' set length %d (should be in 0..%d range)", tFilter.m_sAttrName.cstr(), iGot, g_iMaxFilterValues );
return false;
}
}
break;
case SPH_FILTER_USERVAR:
case SPH_FILTER_STRING:
tFilter.m_dStrings.Add ( tReq.GetString() );
if ( uVer>=0x126 )
tFilter.m_eStrCmpDir = (EStrCmpDir) tReq.GetByte();
break;
case SPH_FILTER_NULL:
tFilter.m_bIsNull = tReq.GetByte()!=0;
break;
case SPH_FILTER_STRING_LIST:
{
int iCount = tReq.GetDword();
if ( iCount<0 || iCount>g_iMaxFilterValues )
{
SendErrorReply ( tOut, "invalid attribute '%s' set length %d (should be in 0..%d range)", tFilter.m_sAttrName.cstr(), iCount, g_iMaxFilterValues );
return false;
}
tFilter.m_dStrings.Resize ( iCount );
ARRAY_FOREACH ( iString, tFilter.m_dStrings )
tFilter.m_dStrings[iString] = tReq.GetString();
}
break;
case SPH_FILTER_EXPRESSION: // need only name and type
break;
default:
SendErrorReply ( tOut, "unknown filter type (type-id=%d)", tFilter.m_eType );
return false;
}
if ( tFilter.m_sAttrName=="@id" )
{
// request coming from old master, need to fix attribute name
tFilter.m_sAttrName = "id";
// and clamp values from uint64_t to int64_t
if ( (uint64_t)tFilter.m_iMinValue > (uint64_t)LLONG_MAX )
tFilter.m_iMinValue = LLONG_MAX;
if ( (uint64_t)tFilter.m_iMaxValue > (uint64_t)LLONG_MAX )
tFilter.m_iMaxValue = LLONG_MAX;
}
tFilter.m_bExclude = !!tReq.GetDword ();
if ( iMasterVer>=15 )
{
tFilter.m_bHasEqualMin = !!tReq.GetDword();
tFilter.m_bHasEqualMax = !!tReq.GetDword();
} else if ( iMasterVer>=5 )
tFilter.m_bHasEqualMin = tFilter.m_bHasEqualMax = !!tReq.GetDword();
if ( iMasterVer>=15 )
{
tFilter.m_bOpenLeft = !!tReq.GetDword();
tFilter.m_bOpenRight = !!tReq.GetDword();
}
tFilter.m_eMvaFunc = SPH_MVAFUNC_ANY;
if ( iMasterVer>=13 )
tFilter.m_eMvaFunc = (ESphMvaFunc)tReq.GetDword();
return true;
}
static void AddDocids ( CSphVector<CSphQueryItem> & dItems )
{
if ( !dItems.GetLength() )
return;
bool bHaveDocID = false;
for ( const auto & i : dItems )
bHaveDocID |= i.m_sAlias==sphGetDocidName() || i.m_sExpr=="*";
if ( !bHaveDocID )
{
CSphQueryItem tId;
tId.m_sExpr = tId.m_sAlias = sphGetDocidName();
dItems.Insert ( 0, tId );
}
}
bool ParseSearchQuery ( InputBuffer_c & tReq, ISphOutputBuffer & tOut, CSphQuery & tQuery, WORD uVer, WORD uMasterVer )
{
// daemon-level defaults
tQuery.m_iRetryCount = DEFAULT_QUERY_RETRY;
tQuery.m_iRetryDelay = DEFAULT_QUERY_RETRY;
tQuery.m_iAgentQueryTimeoutMs = DEFAULT_QUERY_TIMEOUT;
// v.1.27+ flags come first
DWORD uFlags = 0;
if ( uVer>=0x11B )
uFlags = tReq.GetDword();
// v.1.0. mode, limits, weights, ID/TS ranges
tQuery.m_iOffset = tReq.GetInt ();
tQuery.m_iLimit = tReq.GetInt ();
tQuery.m_eMode = (ESphMatchMode) tReq.GetInt ();
tQuery.m_eRanker = (ESphRankMode) tReq.GetInt ();
if ( tQuery.m_eRanker==SPH_RANK_EXPR || tQuery.m_eRanker==SPH_RANK_EXPORT )
tQuery.m_sRankerExpr = tReq.GetString();
tQuery.m_eSort = (ESphSortOrder) tReq.GetInt ();
tQuery.m_sSortBy = tReq.GetString ();
// here we once eliminate SPH_SORT_ATTR_ASC in flavour of SPH_SORT_EXTENDED
if ( tQuery.m_eSort == SPH_SORT_ATTR_ASC )
{
tQuery.m_sSortBy = SphSprintf ( "%s ASC", tQuery.m_sSortBy.cstr() );
tQuery.m_eSort = SPH_SORT_EXTENDED;
}
// here we once eliminate SPH_SORT_ATTR_DESC in flavour of SPH_SORT_EXTENDED
if ( tQuery.m_eSort == SPH_SORT_ATTR_DESC )
{
tQuery.m_sSortBy = SphSprintf ( "%s DESC", tQuery.m_sSortBy.cstr() );
tQuery.m_eSort = SPH_SORT_EXTENDED;
}
sphColumnToLowercase ( const_cast<char *>( tQuery.m_sSortBy.cstr() ) );
tQuery.m_sRawQuery = tReq.GetString ();
{
int iGot = 0;
if ( !tReq.GetDwords ( tQuery.m_dWeights, iGot, SPH_MAX_FIELDS ) )
{
SendErrorReply ( tOut, "invalid weight count %d (should be in 0..%d range)", iGot, SPH_MAX_FIELDS );
return false;
}
}
tQuery.m_sIndexes = tReq.GetString ();
// legacy id range filter
bool bIdrange64 = tReq.GetInt()!=0;
DocID_t tMinDocID = bIdrange64 ? (DocID_t)tReq.GetUint64 () : tReq.GetDword ();
DocID_t tMaxDocID = bIdrange64 ? (DocID_t)tReq.GetUint64 () : tReq.GetDword ();
if ( tMaxDocID==0 || (uint64_t)tMaxDocID==UINT64_MAX )
tMaxDocID = INT64_MAX;
int iAttrFilters = tReq.GetInt ();
if ( iAttrFilters>g_iMaxFilters )
{
SendErrorReply ( tOut, "too many attribute filters (req=%d, max=%d)", iAttrFilters, g_iMaxFilters );
return false;
}
tQuery.m_dFilters.Resize ( iAttrFilters );
for ( auto & i : tQuery.m_dFilters )
if ( !ParseSearchFilter ( i, tReq, tOut, uMasterVer, uVer ) )
return false;
// now add id range filter
if ( tMinDocID!=0 || tMaxDocID!=INT64_MAX )
{
CSphFilterSettings & tFilter = tQuery.m_dFilters.Add();
tFilter.m_sAttrName = sphGetDocidName();
tFilter.m_eType = SPH_FILTER_RANGE;
tFilter.m_iMinValue = tMinDocID;
tFilter.m_iMaxValue = tMaxDocID;
}
tQuery.m_eGroupFunc = (ESphGroupBy) tReq.GetDword ();
tQuery.m_sGroupBy = tReq.GetString ();
sphColumnToLowercase ( const_cast<char *>( tQuery.m_sGroupBy.cstr() ) );
tQuery.m_iMaxMatches = tReq.GetInt ();
tQuery.m_bExplicitMaxMatches = tQuery.m_iMaxMatches!=DEFAULT_MAX_MATCHES; // fixme?
tQuery.m_sGroupSortBy = tReq.GetString ();
tQuery.m_iCutoff = tReq.GetInt();
tQuery.m_iRetryCount = tReq.GetInt ();
tQuery.m_iRetryDelay = tReq.GetInt ();
tQuery.m_sGroupDistinct = tReq.GetString ();
sphColumnToLowercase ( const_cast<char *>( tQuery.m_sGroupDistinct.cstr() ) );
tQuery.m_bGeoAnchor = ( tReq.GetInt()!=0 );
if ( tQuery.m_bGeoAnchor )
{
tQuery.m_sGeoLatAttr = tReq.GetString ();
tQuery.m_sGeoLongAttr = tReq.GetString ();
tQuery.m_fGeoLatitude = tReq.GetFloat ();
tQuery.m_fGeoLongitude = tReq.GetFloat ();
}
tQuery.m_dIndexWeights.Resize ( tReq.GetInt() ); // FIXME! add sanity check
for ( auto& dIndexWeight : tQuery.m_dIndexWeights )
tReq >> dIndexWeight;
tQuery.m_uMaxQueryMsec = tReq.GetDword ();
tQuery.m_dFieldWeights.Resize ( tReq.GetInt() ); // FIXME! add sanity check
for ( auto & dFieldWeight : tQuery.m_dFieldWeights )
tReq >> dFieldWeight;
tQuery.m_sComment = tReq.GetString ();
int nOverrides = tReq.GetInt();
if ( nOverrides>0 )
{
SendErrorReply ( tOut, "overrides are now deprecated" );
return false;
}
tQuery.m_sSelect = tReq.GetString ();
tQuery.m_bAgent = ( uMasterVer>0 );
if ( tQuery.m_sSelect.Begins ( "*,*" ) ) // this is the legacy mark of agent for debug purpose
{
tQuery.m_bAgent = true;
int iSelectLen = tQuery.m_sSelect.Length();
tQuery.m_sSelect = ( iSelectLen>4 ? tQuery.m_sSelect.SubString ( 4, iSelectLen-4 ) : "*" );
}
// fixup select list
if ( tQuery.m_sSelect.IsEmpty () )
tQuery.m_sSelect = "*";
// master sends items to agents since master.version=15
CSphString sError;
if ( uMasterVer<15 && !ParseSelectList ( sError, tQuery ) )
{
// we want to see a parse error in query_log_format=sphinxql mode too
if ( g_eLogFormat==LOG_FORMAT_SPHINXQL && g_iQueryLogFile>=0 )
{
StringBuilder_c tBuf;
tBuf << "/* ";
sphFormatCurrentTime ( tBuf );
tBuf << "*/ " << tQuery.m_sSelect << " # error=" << sError << '\n';
sphSeek ( g_iQueryLogFile, 0, SEEK_END );
sphWrite ( g_iQueryLogFile, tBuf.cstr(), tBuf.GetLength() );
}
SendErrorReply ( tOut, "select: %s", sError.cstr () );
return false;
}
// v.1.27
if ( uVer>=0x11B )
{
// parse simple flags
tQuery.m_bSortKbuffer = !!( uFlags & QFLAG_SORT_KBUFFER );
tQuery.m_bSimplify = !!( uFlags & QFLAG_SIMPLIFY );
tQuery.m_bPlainIDF = !!( uFlags & QFLAG_PLAIN_IDF );
tQuery.m_bGlobalIDF = !!( uFlags & QFLAG_GLOBAL_IDF );
if ( uVer<0x125 || ( uVer>=0x125 && ( uFlags & QFLAG_LOCAL_DF_SET )==QFLAG_LOCAL_DF_SET ) )
tQuery.m_bLocalDF = !!( uFlags & QFLAG_LOCAL_DF );
tQuery.m_bLowPriority = !!( uFlags & QFLAG_LOW_PRIORITY );
tQuery.m_bFacet = !!( uFlags & QFLAG_FACET );
tQuery.m_bFacetHead = !!( uFlags & QFLAG_FACET_HEAD );
tQuery.m_eQueryType = (uFlags & QFLAG_JSON_QUERY) ? QUERY_JSON : QUERY_API;
tQuery.m_bNotOnlyAllowed = !!( uFlags & QFLAG_NOT_ONLY_ALLOWED );
if ( uMasterVer>0 || uVer==0x11E )
tQuery.m_bNormalizedTFIDF = !!( uFlags & QFLAG_NORMALIZED_TF );
// fetch optional stuff
if ( uFlags & QFLAG_MAX_PREDICTED_TIME )
tQuery.m_iMaxPredictedMsec = tReq.GetInt();
}
// v.1.29
if ( uVer>=0x11D )
{
tQuery.m_sOuterOrderBy = tReq.GetString();
tQuery.m_iOuterOffset = tReq.GetDword();
tQuery.m_iOuterLimit = tReq.GetDword();
tQuery.m_bHasOuter = ( tReq.GetInt()!=0 );
}
if ( uVer>=0x124 )
tQuery.m_iExpansionLimit = tReq.GetInt();
// extension v.1
tQuery.m_eCollation = GlobalCollation ();
if ( uMasterVer>=1 )
tQuery.m_eCollation = (ESphCollation)tReq.GetDword();
// extension v.2
if ( uMasterVer>=2 )
{
tQuery.m_sOuterOrderBy = tReq.GetString();
if ( tQuery.m_bHasOuter )
tQuery.m_iOuterLimit = tReq.GetInt();
}
if ( uMasterVer>=6 )
tQuery.m_iGroupbyLimit = tReq.GetInt();
if ( uMasterVer>=14 )
{
tQuery.m_sUDRanker = tReq.GetString();
tQuery.m_sUDRankerOpts = tReq.GetString();
}
if ( uMasterVer>=14 || uVer>=0x120 )
{
tQuery.m_sQueryTokenFilterLib = tReq.GetString();
tQuery.m_sQueryTokenFilterName = tReq.GetString();
tQuery.m_sQueryTokenFilterOpts = tReq.GetString();
}
if ( uVer>=0x121 )
{
tQuery.m_dFilterTree.Resize ( tReq.GetInt() );
for ( FilterTreeItem_t &tItem : tQuery.m_dFilterTree )
{
tItem.m_iLeft = tReq.GetInt();
tItem.m_iRight = tReq.GetInt();
tItem.m_iFilterItem = tReq.GetInt();
tItem.m_bOr = ( tReq.GetInt()!=0 );
}
}
if ( uMasterVer>=15 )
{
tQuery.m_dItems.Resize ( tReq.GetInt() );
for ( CSphQueryItem &tItem : tQuery.m_dItems )
{
tItem.m_sAlias = tReq.GetString();
tItem.m_sExpr = tReq.GetString();
tItem.m_eAggrFunc = (ESphAggrFunc)tReq.GetDword();
}
tQuery.m_dRefItems.Resize ( tReq.GetInt() );
for ( CSphQueryItem &tItem : tQuery.m_dRefItems )
{
tItem.m_sAlias = tReq.GetString();
tItem.m_sExpr = tReq.GetString();
tItem.m_eAggrFunc = (ESphAggrFunc)tReq.GetDword();
}
}
if ( uMasterVer>=16 )
tQuery.m_eExpandKeywords = (QueryOption_e)tReq.GetDword();
// pre-v.20 had old-style index hints, but they were not documented anyway
if ( uMasterVer>=20 )
{
tQuery.m_dIndexHints.Resize ( tReq.GetDword() );
for ( auto & i : tQuery.m_dIndexHints )
{
i.m_sIndex = tReq.GetString();
i.m_eType = (SecondaryIndexType_e)tReq.GetDword();
i.m_bForce = !!tReq.GetDword();
}
}
if ( uMasterVer>=21 )
{
tQuery.m_eJoinType = (JoinType_e)tReq.GetDword();
tQuery.m_sJoinIdx = tReq.GetString();
tQuery.m_sJoinQuery = tReq.GetString();
tQuery.m_dOnFilters.Resize ( tReq.GetDword() );
for ( auto & i : tQuery.m_dOnFilters )
{
i.m_sIdx1 = tReq.GetString();
i.m_sAttr1 = tReq.GetString();
i.m_sIdx2 = tReq.GetString();
i.m_sAttr2 = tReq.GetString();
if ( uMasterVer>=22 )
{
i.m_eTypeCast1 = (ESphAttr)tReq.GetInt();
i.m_eTypeCast2 = (ESphAttr)tReq.GetInt();
}
}
}
if ( uMasterVer>=22 )
{
tQuery.m_sKNNAttr = tReq.GetString();
if ( !tQuery.m_sKNNAttr.IsEmpty() )
{
tQuery.m_iKNNK = tReq.GetInt();
tQuery.m_iKnnEf = tReq.GetInt();
tQuery.m_dKNNVec.Resize ( tReq.GetInt() );
for ( auto & i : tQuery.m_dKNNVec )
i = tReq.GetFloat();
}
}
if ( uMasterVer>=23 )
tQuery.m_eJiebaMode = (JiebaMode_e)tReq.GetInt();
/////////////////////
// additional checks
/////////////////////
// queries coming from API may not request docids
// but we still need docids when sending result sets
AddDocids ( tQuery.m_dItems );
AddDocids ( tQuery.m_dRefItems );
if ( tReq.GetError() )
{
SendErrorReply ( tOut, "invalid or truncated request" );
return false;
}
CheckQuery ( tQuery, sError );
if ( !sError.IsEmpty() )
{
SendErrorReply ( tOut, "%s", sError.cstr() );
return false;
}
// now prepare it for the engine
tQuery.m_sQuery = tQuery.m_sRawQuery;
if ( tQuery.m_eQueryType!=QUERY_JSON )
PrepareQueryEmulation ( &tQuery );
FixupQuerySettings ( tQuery );
// all ok
return true;
}
//////////////////////////////////////////////////////////////////////////
using QuotationEscapedBuilder = EscapedStringBuilder_T<BaseQuotation_T<EscapeQuotator_t>>;
void LogQueryPlain ( const CSphQuery & tQuery, const CSphQueryResultMeta & tMeta )
{
assert ( g_eLogFormat==LOG_FORMAT_PLAIN );
if ( ( !g_bQuerySyslog && g_iQueryLogFile<0 ) || !tMeta.m_sError.IsEmpty() )
return;
QuotationEscapedBuilder tBuf;
// [time]
#if USE_SYSLOG
if ( !g_bQuerySyslog )
{
#endif
tBuf << '[';
sphFormatCurrentTime ( tBuf );
tBuf << ']';
#if USE_SYSLOG
} else
tBuf += "[query]";
#endif
// querytime sec
int iQueryTime = Max ( tMeta.m_iQueryTime, 0 );
int iRealTime = Max ( tMeta.m_iRealQueryTime, 0 );
tBuf.Appendf ( " %d.%03d sec", iRealTime/1000, iRealTime%1000 );
tBuf.Appendf ( " %d.%03d sec", iQueryTime/1000, iQueryTime%1000 );
// optional multi-query multiplier
if ( tMeta.m_iMultiplier>1 )
tBuf.Appendf ( " x%d", tMeta.m_iMultiplier );
// [matchmode/numfilters/sortmode matches (offset,limit)
static const char * sModes [ SPH_MATCH_TOTAL ] = { "all", "any", "phr", "bool", "ext", "scan", "ext2" };
static const char * sSort [ SPH_SORT_TOTAL ] = { "rel", "attr-", "attr+", "tsegs", "ext", "expr" };
tBuf.Appendf ( " [%s/%d/%s " INT64_FMT " (%d,%d)",
sModes [ tQuery.m_eMode ], tQuery.m_dFilters.GetLength(), sSort [ tQuery.m_eSort ], tMeta.m_iTotalMatches,
tQuery.m_iOffset, tQuery.m_iLimit );
// optional groupby info
if ( !tQuery.m_sGroupBy.IsEmpty() )
tBuf.Appendf ( " @%s", tQuery.m_sGroupBy.cstr() );
// ] [indexes]
tBuf.Appendf ( "] [%s]", tQuery.m_sIndexes.cstr() );
// optional performance counters
if ( g_bIOStats || g_bCpuStats )
{
const CSphIOStats & IOStats = tMeta.m_tIOStats;
tBuf += " [";
if ( g_bIOStats )
tBuf.Appendf ( "ios=%d kb=%d.%d ioms=%d.%d",
IOStats.m_iReadOps, (int)( IOStats.m_iReadBytes/1024 ), (int)( IOStats.m_iReadBytes%1024 )*10/1024,
(int)( IOStats.m_iReadTime/1000 ), (int)( IOStats.m_iReadTime%1000 )/100 );
if ( g_bIOStats && g_bCpuStats )
tBuf += " ";
if ( g_bCpuStats )
tBuf.Sprintf ( "cpums=%.1D", tMeta.m_iCpuTime/100 );
tBuf += "]";
}
// optional query comment
if ( !tQuery.m_sComment.IsEmpty() )
tBuf.Appendf ( " [%s]", tQuery.m_sComment.cstr() );
// query
// (m_sRawQuery is empty when using MySQL handler)
const CSphString & sQuery = tQuery.m_sRawQuery.IsEmpty()
? tQuery.m_sQuery
: tQuery.m_sRawQuery;
if ( !sQuery.IsEmpty() )
{
tBuf += " ";
tBuf.FixupSpacesAndAppend ( sQuery.cstr() );
}
#if USE_SYSLOG
if ( !g_bQuerySyslog )
{
#endif
// line feed
tBuf += "\n";
sphSeek ( g_iQueryLogFile, 0, SEEK_END );
sphWrite ( g_iQueryLogFile, tBuf.cstr(), tBuf.GetLength() );
#if USE_SYSLOG
} else
{
syslog ( LOG_INFO, "%s", tBuf.cstr() );
}
#endif
}
namespace {
CSphString RemoveBackQuotes ( const char * pSrc )
{
CSphString sResult;
if ( !pSrc )
return sResult;
size_t iLen = strlen ( pSrc );
if ( !iLen )
return sResult;
auto szResult = new char[iLen+1];
auto * sMax = pSrc+iLen;
auto d = szResult;
while ( pSrc<sMax )
{
auto sQuote = (const char *) memchr ( pSrc, '`', sMax-pSrc );
if ( !sQuote )
sQuote = sMax;
auto iChunk = sQuote-pSrc;
memmove ( d, pSrc, iChunk );
d += iChunk;
pSrc += iChunk+1; // +1 to skip the quote
}
*d = '\0';
if ( !*szResult ) // never return allocated, but empty str. Prefer to return nullptr instead.
SafeDeleteArray( szResult );
sResult.Adopt ( &szResult );
return sResult;
}
}
static void FormatOrderBy ( StringBuilder_c * pBuf, const char * sPrefix, ESphSortOrder eSort, const CSphString & sSort )
{
assert ( pBuf );
if ( eSort==SPH_SORT_EXTENDED && sSort=="@weight desc" )
return;
const char * sSubst = "@weight";
if ( sSort!="@relevance" )
sSubst = sSort.cstr();
auto sUnquoted = RemoveBackQuotes ( sSubst );
sSubst = sUnquoted.cstr();
// for simplicity check that sPrefix is already prefixed/suffixed by spaces.
assert ( sPrefix && sPrefix[0]==' ' && sPrefix[strlen ( sPrefix )-1]==' ' );
*pBuf << sPrefix;
switch ( eSort )
{
case SPH_SORT_TIME_SEGMENTS: *pBuf << "TIME_SEGMENT(" << sSubst << ")"; break;
case SPH_SORT_EXTENDED: *pBuf << sSubst; break;
case SPH_SORT_EXPR: *pBuf << "BUILTIN_EXPR()"; break;
case SPH_SORT_RELEVANCE: *pBuf << "weight() desc"; if ( sSubst ) *pBuf << ", " << sSubst; break;
default: pBuf->Appendf ( "mode-%d", (int)eSort ); break;
}
}
static const CSphQuery g_tDefaultQuery {};
static void FormatSphinxql ( const CSphQuery & q, int iCompactIN, QuotationEscapedBuilder & tBuf );
static void FormatList ( const CSphVector<CSphNamedInt> & dValues, StringBuilder_c & tBuf )
{
ScopedComma_c tComma ( tBuf, ", " );
for ( const auto& dValue : dValues )
tBuf << dValue;
}
static void FormatOption ( const CSphQuery & tQuery, StringBuilder_c & tBuf )
{
ScopedComma_c tOptionComma ( tBuf, ", ", " OPTION ");
if ( tQuery.m_iMaxMatches!=DEFAULT_MAX_MATCHES )
tBuf.Appendf ( "max_matches=%d", tQuery.m_iMaxMatches );
if ( !tQuery.m_sComment.IsEmpty() )
tBuf.Appendf ( "comment='%s'", tQuery.m_sComment.cstr() ); // FIXME! escape, replace newlines..
if ( tQuery.m_eRanker!=SPH_RANK_DEFAULT )
{
const char * sRanker = sphGetRankerName ( tQuery.m_eRanker );
if ( !sRanker )
sRanker = sphGetRankerName ( SPH_RANK_DEFAULT );
if ( tQuery.m_sRankerExpr.IsEmpty() )
tBuf.Appendf ( "ranker=%s", sRanker );
else
tBuf.Appendf ( "ranker=%s(\'%s\')", sRanker, tQuery.m_sRankerExpr.scstr() );
}
if ( tQuery.m_iAgentQueryTimeoutMs!=DEFAULT_QUERY_TIMEOUT )
tBuf.Appendf ( "agent_query_timeout=%d", tQuery.m_iAgentQueryTimeoutMs );
if ( tQuery.m_iCutoff!=g_tDefaultQuery.m_iCutoff )
tBuf.Appendf ( "cutoff=%d", tQuery.m_iCutoff );
if ( tQuery.m_dFieldWeights.GetLength() )
{
tBuf.StartBlock (nullptr,"field_weights=(",")");
FormatList ( tQuery.m_dFieldWeights, tBuf );
tBuf.FinishBlock ();
}
if ( tQuery.m_bGlobalIDF!=g_tDefaultQuery.m_bGlobalIDF )
tBuf << "global_idf=1";
if ( tQuery.m_bPlainIDF || !tQuery.m_bNormalizedTFIDF )
{
tBuf.StartBlock(",","idf='","'");
tBuf << ( tQuery.m_bPlainIDF ? "plain" : "normalized" )
<< ( tQuery.m_bNormalizedTFIDF ? "tfidf_normalized" : "tfidf_unnormalized" );
tBuf.FinishBlock ();
}
if ( tQuery.m_bLocalDF.has_value() )
tBuf.Appendf ( "local_df=%d", tQuery.m_bLocalDF.value() ? 1 : 0 );
if ( tQuery.m_dIndexWeights.GetLength() )
{
tBuf.StartBlock ( nullptr, "index_weights=(", ")" );
FormatList ( tQuery.m_dIndexWeights, tBuf );
tBuf.FinishBlock ();
}
if ( tQuery.m_uMaxQueryMsec!=g_tDefaultQuery.m_uMaxQueryMsec )
tBuf.Appendf ( "max_query_time=%u", tQuery.m_uMaxQueryMsec );
if ( tQuery.m_iMaxPredictedMsec!=g_tDefaultQuery.m_iMaxPredictedMsec )
tBuf.Appendf ( "max_predicted_time=%d", tQuery.m_iMaxPredictedMsec );
if ( tQuery.m_iRetryCount!=DEFAULT_QUERY_RETRY )
tBuf.Appendf ( "retry_count=%d", tQuery.m_iRetryCount );
if ( tQuery.m_iRetryDelay!=DEFAULT_QUERY_RETRY )
tBuf.Appendf ( "retry_delay=%d", tQuery.m_iRetryDelay );
if ( tQuery.m_iRandSeed!=g_tDefaultQuery.m_iRandSeed )
tBuf.Appendf ( "rand_seed=" INT64_FMT, tQuery.m_iRandSeed );
if ( !tQuery.m_sQueryTokenFilterLib.IsEmpty() )
{
if ( tQuery.m_sQueryTokenFilterOpts.IsEmpty() )
tBuf.Appendf ( "token_filter = '%s:%s'", tQuery.m_sQueryTokenFilterLib.cstr(), tQuery.m_sQueryTokenFilterName.cstr() );
else
tBuf.Appendf ( "token_filter = '%s:%s:%s'", tQuery.m_sQueryTokenFilterLib.cstr(), tQuery.m_sQueryTokenFilterName.cstr(), tQuery.m_sQueryTokenFilterOpts.cstr() );
}
if ( tQuery.m_bIgnoreNonexistent )
tBuf << "ignore_nonexistent_columns=1";
if ( tQuery.m_bIgnoreNonexistentIndexes )
tBuf << "ignore_nonexistent_indexes=1";
if ( tQuery.m_bStrict )
tBuf << "strict=1";
if ( tQuery.m_eExpandKeywords!=QUERY_OPT_DEFAULT && tQuery.m_eExpandKeywords!=QUERY_OPT_MORPH_NONE )
tBuf.Appendf ( "expand_keywords=%d", ( tQuery.m_eExpandKeywords==QUERY_OPT_ENABLED ? 1 : 0 ) );
if ( tQuery.m_eExpandKeywords==QUERY_OPT_MORPH_NONE )
tBuf.Appendf ( "morphology=none" );
if ( tQuery.m_iExpansionLimit!=DEFAULT_QUERY_EXPANSION_LIMIT )
tBuf.Appendf ( "expansion_limit=%d", tQuery.m_iExpansionLimit );
}
static CSphString GenerateHintName ( const IndexHint_t & tHint )
{
CSphString sName;
switch ( tHint.m_eType )
{
case SecondaryIndexType_e::FILTER: sName = "Filter"; break;
case SecondaryIndexType_e::LOOKUP: sName = "DocidIndex"; break;
case SecondaryIndexType_e::INDEX: sName = "SecondaryIndex"; break;
case SecondaryIndexType_e::ANALYZER:sName = "ColumnarScan"; break;
default: sName = "None"; break;
}
if ( !tHint.m_bForce )
sName.SetSprintf ( "NO_%s", sName.cstr() );
return sName;
}
static void AppendHint ( const IndexHint_t & tHint, const StrVec_t & dIndexes, StringBuilder_c & tBuf )
{
CSphString sName;
sName.SetSprintf ( " %s (", GenerateHintName(tHint).cstr() );
ScopedComma_c tComma ( tBuf, ",", sName.cstr(), ")" );
for ( const auto & sIndex : dIndexes )
tBuf << sIndex;
}
static void FormatIndexHints ( const CSphQuery & tQuery, StringBuilder_c & tBuf )
{
if ( !tQuery.m_dIndexHints.GetLength() )
return;
ScopedComma_c sMatch ( tBuf, nullptr );
CSphVector<bool> dUsed { tQuery.m_dIndexHints.GetLength() };
dUsed.ZeroVec();
tBuf << " /*+ ";
ARRAY_FOREACH ( i, tQuery.m_dIndexHints )
{
if ( dUsed[i] )
continue;
StrVec_t dIndexes;
dIndexes.Add ( tQuery.m_dIndexHints[i].m_sIndex );
for ( int j = i+1; j<tQuery.m_dIndexHints.GetLength(); j++)
if ( !dUsed[j] && tQuery.m_dIndexHints[i].m_eType==tQuery.m_dIndexHints[j].m_eType && tQuery.m_dIndexHints[i].m_bForce==tQuery.m_dIndexHints[j].m_bForce )
{
dIndexes.Add ( tQuery.m_dIndexHints[j].m_sIndex );
dUsed[j] = true;
}
AppendHint ( tQuery.m_dIndexHints[i], dIndexes, tBuf );
}
tBuf << " */";
}
static void LogQueryJson ( const CSphQuery & q, StringBuilder_c & tBuf )
{
if ( q.m_sRawQuery.IsEmpty() )
tBuf << " /*" << "{\"index\":\"" << q.m_sIndexes << "\"}*/ /*" << q.m_sQuery << " */";
else
tBuf << " /*" << q.m_sRawQuery << " */";
}
inline static void FormatTimeConnClient ( StringBuilder_c& tBuf )
{
sphFormatCurrentTime ( tBuf );
tBuf << " conn " << session::GetConnID() << " (" << session::szClientName() << ")";
}
static void LogQuerySphinxql ( const CSphQuery & q, const CSphQueryResultMeta & tMeta, const CSphVector<int64_t> & dAgentTimes )
{
assert ( g_eLogFormat==LOG_FORMAT_SPHINXQL );
if ( g_iQueryLogFile<0 )
return;
QuotationEscapedBuilder tBuf;
int iCompactIN = ( g_bLogCompactIn ? LOG_COMPACT_IN : 0 );
// time, conn id, wall, found
int iQueryTime = Max ( tMeta.m_iQueryTime, 0 );
int iRealTime = Max ( tMeta.m_iRealQueryTime, 0 );
tBuf << "/* ";
FormatTimeConnClient ( tBuf );
tBuf << " real " << FixedFrac ( iRealTime ) << " wall " << FixedFrac ( iQueryTime );
if ( tMeta.m_iMultiplier>1 )
tBuf << " x" << tMeta.m_iMultiplier;
tBuf << " found " << tMeta.m_iTotalMatches << " */ ";
///////////////////////////////////
// format request as SELECT query
///////////////////////////////////
if ( q.m_eQueryType==QUERY_JSON )
LogQueryJson ( q, tBuf );
else
FormatSphinxql ( q, iCompactIN, tBuf );
///////////////
// query stats
///////////////
// next block ecnlosed in /* .. */, space-separated
tBuf.StartBlock ( " ", " /*", " */" );
if ( !tMeta.m_sError.IsEmpty() )
{
// all we have is an error
tBuf.Appendf ( "error=%s", tMeta.m_sError.cstr() );
} else
{
// performance counters
if ( g_bIOStats || g_bCpuStats )
{
const CSphIOStats & IOStats = tMeta.m_tIOStats;
if ( g_bIOStats )
tBuf.Sprintf ( "ios=%d kb=%d.%d ioms=%.1D",
IOStats.m_iReadOps, (int)( IOStats.m_iReadBytes/1024 ), (int)( IOStats.m_iReadBytes%1024 )*10/1024,
IOStats.m_iReadTime/100 );
if ( g_bCpuStats )
tBuf.Sprintf ( "cpums=%.1D", tMeta.m_iCpuTime/100 );
}
// per-agent times
if ( dAgentTimes.GetLength() )
{
ScopedComma_c dAgents ( tBuf, ", ", " agents=(",")");
for ( auto iTime : dAgentTimes )
tBuf.Appendf ( "%d.%03d",
(int)( iTime/1000),
(int)( iTime%1000) );
}
// merged stats
if ( tMeta.m_hWordStats.GetLength() && ( tMeta.m_tExpansionStats.m_iTerms || tMeta.m_tExpansionStats.m_iMerged ) )
tBuf.Appendf ( "terms expansion=(merged %d, not merged %d)", tMeta.m_tExpansionStats.m_iMerged, tMeta.m_tExpansionStats.m_iTerms );
// warning
if ( !tMeta.m_sWarning.IsEmpty() )
tBuf.Appendf ( "warning=%s", tMeta.m_sWarning.cstr() );
}
tBuf.FinishBlock (); // close the comment
// line feed
tBuf += "\n";
sphSeek ( g_iQueryLogFile, 0, SEEK_END );
sphWrite ( g_iQueryLogFile, tBuf.cstr(), tBuf.GetLength() );
}
void FormatSphinxql ( const CSphQuery & q, int iCompactIN, QuotationEscapedBuilder & tBuf )
{
if ( q.m_bHasOuter )
tBuf << "SELECT * FROM (";
if ( q.m_sSelect.IsEmpty() )
tBuf << "SELECT * FROM " << q.m_sIndexes;
else
tBuf << "SELECT " << RemoveBackQuotes ( q.m_sSelect.cstr() ) << " FROM " << q.m_sIndexes;
// WHERE clause
// (m_sRawQuery is empty when using MySQL handler)
const CSphString & sQuery = q.m_sQuery;
if ( !sQuery.IsEmpty() || q.m_dFilters.GetLength() )
{
ScopedComma_c sWHERE ( tBuf, " AND ", " WHERE ");
if ( !sQuery.IsEmpty() )
{
ScopedComma_c sMatch (tBuf, nullptr, "MATCH(", ")");
tBuf.FixupSpacedAndAppendEscaped ( sQuery.cstr() );
}
FormatFiltersQL ( q.m_dFilters, q.m_dFilterTree, tBuf, iCompactIN );
}
// ORDER BY and/or GROUP BY clause
if ( q.m_sGroupBy.IsEmpty() )
{
if ( !q.m_sSortBy.IsEmpty() ) // case API SPH_MATCH_EXTENDED2 - SPH_SORT_RELEVANCE
FormatOrderBy ( &tBuf, " ORDER BY ", q.m_eSort, q.m_sSortBy );
} else
{
tBuf << " GROUP BY " << q.m_sGroupBy;
FormatOrderBy ( &tBuf, " WITHIN GROUP ORDER BY ", q.m_eSort, q.m_sSortBy );
if ( !q.m_tHaving.m_sAttrName.IsEmpty() )
{
ScopedComma_c sHawing ( tBuf, nullptr," HAVING ");
FormatFilterQL ( q.m_tHaving, tBuf, iCompactIN );
}
if ( q.m_sGroupSortBy!="@group desc" )
FormatOrderBy ( &tBuf, " ORDER BY ", SPH_SORT_EXTENDED, q.m_sGroupSortBy );
}
// LIMIT clause
if ( q.m_iOffset!=0 || q.m_iLimit!=20 )
tBuf << " LIMIT ";
if ( q.m_iOffset )
tBuf << q.m_iOffset << ',';
if ( q.m_iLimit != 20 )
tBuf << q.m_iLimit;
// OPTION clause
FormatOption ( q, tBuf );
FormatIndexHints ( q, tBuf );
// outer order by, limit
if ( q.m_bHasOuter )
{
tBuf << ')';
if ( !q.m_sOuterOrderBy.IsEmpty() )
tBuf << " ORDER BY " << q.m_sOuterOrderBy;
if ( q.m_iOuterOffset>0 )
tBuf << " LIMIT " << q.m_iOuterOffset << ", " << q.m_iOuterLimit;
else if ( q.m_iOuterLimit>0 )
tBuf << " LIMIT " << q.m_iOuterLimit;
}
// finish SQL statement
tBuf << ';';
}
static void LogQuery ( const CSphQuery & q, const CSphQueryResultMeta & tMeta, const CSphVector<int64_t> & dAgentTimes )
{
if ( g_iQueryLogMinMs>0 && tMeta.m_iQueryTime<g_iQueryLogMinMs )
return;
// should not log query from buddy in the info but only in debug and more verbose
bool bNoLogQuery = ( ( q.m_uDebugFlags & QUERY_DEBUG_NO_LOG )==QUERY_DEBUG_NO_LOG );
if ( bNoLogQuery && g_eLogLevel==SPH_LOG_INFO )
return;
switch ( g_eLogFormat )
{
case LOG_FORMAT_PLAIN: LogQueryPlain ( q, tMeta ); break;
case LOG_FORMAT_SPHINXQL: LogQuerySphinxql ( q, tMeta, dAgentTimes ); break;
}
}
static void WriteQuery ( const StringBuilder_c & tBuf )
{
sphSeek ( g_iQueryLogFile, 0, SEEK_END );
sphWrite ( g_iQueryLogFile, tBuf.cstr(), tBuf.GetLength() );
}
void LogSphinxqlError ( const char * sStmt, const Str_t & sError )
{
if ( g_eLogFormat!=LOG_FORMAT_SPHINXQL || g_iQueryLogFile<0 || !sStmt || IsEmpty(sError) )
return;
StringBuilder_c tBuf;
tBuf << "/* ";
FormatTimeConnClient ( tBuf );
tBuf << " */ " << sStmt << " # error=" << sError << '\n';
WriteQuery ( tBuf );
}
void LogSphinxqlError ( const Str_t & sStmt, const Str_t & sError )
{
if ( g_eLogFormat!=LOG_FORMAT_SPHINXQL || g_iQueryLogFile<0 || IsEmpty ( sStmt ) || IsEmpty ( sError ) )
return;
QuotationEscapedBuilder tBuf;
tBuf << "/* ";
FormatTimeConnClient ( tBuf );
tBuf << " */ " ;
tBuf.AppendEscaped ( sStmt.first, EscBld::eFixupSpace, sStmt.second );
tBuf << " # error=" << sError << '\n';
WriteQuery ( tBuf );
}
void LogBuddyQuery ( const Str_t sQuery, BuddyQuery_e tType )
{
if ( g_eLogFormat!=LOG_FORMAT_SPHINXQL || g_iQueryLogFile<0 || IsEmpty ( sQuery ) )
return;
const auto & tMeta = session::GetClientSession()->m_tLastMeta;
QuotationEscapedBuilder tBuf;
// time, conn id, wall, found
int iQueryTime = Max ( tMeta.m_iQueryTime, 0 );
int iRealTime = Max ( tMeta.m_iRealQueryTime, 0 );
tBuf << "/* ";
FormatTimeConnClient ( tBuf );
tBuf << " real " << FixedFrac ( iRealTime ) << " wall " << FixedFrac ( iQueryTime );
if ( tMeta.m_iMultiplier>1 )
tBuf << " x" << tMeta.m_iMultiplier;
tBuf << " found " << tMeta.m_iTotalMatches << " */ ";
if ( tType==BuddyQuery_e::HTTP )
tBuf << "/* ";
tBuf.AppendEscaped ( sQuery.first, EscBld::eFixupSpace, sQuery.second );
if ( tType==BuddyQuery_e::HTTP )
tBuf << " */";
tBuf << ";\n";
WriteQuery ( tBuf );
}
void ReportIndexesName ( int iSpanStart, int iSpandEnd, const CSphVector<SearchFailure_t> & dLog, StringBuilder_c & sOut )
{
int iSpanLen = iSpandEnd - iSpanStart;
// report distributed index in case all failures are from their locals
if ( iSpanLen>1 && !dLog[iSpanStart].m_sParentIndex.IsEmpty ()
&& dLog[iSpanStart].m_sParentIndex==dLog[iSpandEnd-1].m_sParentIndex )
{
auto pDist = GetDistr ( dLog[iSpanStart].m_sParentIndex );
if ( pDist && pDist->m_dLocal.GetLength ()==iSpanLen )
{
sOut << "index " << dLog[iSpanStart].m_sParentIndex << ": ";
return;
}
}
// report only first indexes up to 4
int iEndReport = ( iSpanLen>4 ) ? iSpanStart+3 : iSpandEnd;
sOut.StartBlock ( ",", "table " );
for ( int j = iSpanStart; j<iEndReport; ++j )
sOut << dLog[j].m_sIndex;
sOut.FinishBlock ();
// add total index count
if ( iEndReport!=iSpandEnd )
sOut.Sprintf ( " and %d more: ", iSpandEnd-iEndReport );
else
sOut += ": ";
}
static void LogStatementSphinxql ( Str_t sQuery, int iRealTime )
{
if ( g_iQueryLogFile<0 || g_eLogFormat!=LOG_FORMAT_SPHINXQL || !IsFilled ( sQuery ) )
return;
if ( session::IsQueryLogDisabled() && g_eLogLevel==SPH_LOG_INFO )
return;
StringBuilder_c tBuf;
tBuf << "/* ";
FormatTimeConnClient ( tBuf );
tBuf << " real " << FixedFrac ( iRealTime ) << " */ "
// query
<< sQuery
// finish statement and line feed
<< ";\n";
sphSeek ( g_iQueryLogFile, 0, SEEK_END );
sphWrite ( g_iQueryLogFile, tBuf.cstr(), tBuf.GetLength() );
}
static int64_t LogFilterStatementSphinxql ( Str_t sQuery, SqlStmt_e eStmt )
{
if ( g_tLogStatements.IsEmpty() )
return 0;
if ( !g_tLogStatements.BitGet ( eStmt ) )
return 0;
int64_t tmStarted = sphMicroTimer();
LogStatementSphinxql ( sQuery, 0 );
return tmStarted;
}
//////////////////////////////////////////////////////////////////////////
void sphGetAttrsToSend ( const ISphSchema & tSchema, bool bAgentMode, bool bNeedId, CSphBitvec & tAttrs )
{
int iCount = tSchema.GetAttrsCount();
tAttrs.Init ( iCount );
if ( !bAgentMode && iCount && IsSortStringInternal ( tSchema.GetAttr ( iCount-1 ).m_sName ) )
{
for ( int i=iCount-1; i>=0 && IsSortStringInternal ( tSchema.GetAttr(i).m_sName ); --i )
iCount = i;
}
for ( int i = 0; i < iCount; ++i )
if ( !sphIsInternalAttr ( tSchema.GetAttr(i) ) )
tAttrs.BitSet(i);
int iId = tSchema.GetAttrIndex ( sphGetDocidName() );
if ( !bAgentMode && iId!=-1 && !bNeedId )
tAttrs.BitClear(iId);
}
static void SendDataPtrAttr ( ISphOutputBuffer& tOut, const BYTE * pData )
{
auto dData = sphUnpackPtrAttr ( pData );
tOut.SendArray ( dData );
}
static char g_sJsonNull[] = "{}";
static void SendJsonAsString ( ISphOutputBuffer& tOut, const BYTE * pJSON )
{
if ( pJSON )
{
auto dData = sphUnpackPtrAttr ( pJSON );
JsonEscapedBuilder dJson;
dJson.GrowEnough ( dData.second * 2 );
sphJsonFormat ( dJson, dData.first );
tOut.SendArray ( dJson );
} else
// magic zero - "{}"
tOut.SendArray ( g_sJsonNull, sizeof ( g_sJsonNull )-1 );
}
static void SendJson ( ISphOutputBuffer& tOut, const BYTE * pJSON, bool bSendJson )
{
if ( bSendJson )
SendDataPtrAttr ( tOut, pJSON ); // send BSON
else
SendJsonAsString ( tOut, pJSON ); // send string
}
static void SendJsonFieldAsString ( ISphOutputBuffer& tOut, const BYTE * pJSON )
{
if ( !pJSON )
{
tOut.SendDword(0);
return;
}
auto dData = sphUnpackPtrAttr ( pJSON );
auto eJson = (ESphJsonType) *dData.first++;
JsonEscapedBuilder dJson;
dJson.GrowEnough ( dData.second * 2 );
sphJsonFieldFormat ( dJson, dData.first, eJson, false );
tOut.SendArray ( dJson );
}
static void SendJsonField ( ISphOutputBuffer& tOut, const BYTE * pJSON, bool bSendJsonField )
{
if ( !bSendJsonField )
{
SendJsonFieldAsString ( tOut, pJSON );
return;
}
auto dData = sphUnpackPtrAttr ( pJSON );
if ( IsEmpty ( dData ) || *dData.first==JSON_EOF )
tOut.SendByte ( JSON_EOF );
else
{
tOut.SendByte ( *dData.first );
tOut.SendArray ( dData.first+1, dData.second-1 );
}
}
static void SendMVA ( ISphOutputBuffer& tOut, const BYTE * pMVA, bool b64bit )
{
if ( !pMVA )
{
tOut.SendDword ( 0 );
return;
}
auto dMVA = sphUnpackPtrAttr ( pMVA );
DWORD uValues = dMVA.second / sizeof(DWORD);
tOut.SendDword(uValues);
const auto * pValues = (const DWORD *) dMVA.first;
if ( b64bit )
{
assert ( ( uValues%2 )==0 );
while ( uValues )
{
auto uMVA = MVA_BE ( pValues );
tOut.SendDword ( uMVA.first );
tOut.SendDword ( uMVA.second );
pValues += 2;
uValues -= 2;
}
} else
{
while ( uValues-- )
tOut.SendDword ( *pValues++ );
}
}
static void SendFloatVec ( ISphOutputBuffer & tOut, const BYTE * pData )
{
if ( !pData )
{
tOut.SendDword(0);
return;
}
auto dFloatVec = sphUnpackPtrAttr ( pData );
DWORD uValues = dFloatVec.second / sizeof(float);
tOut.SendDword(uValues);
auto pValues = (const float *) dFloatVec.first;
while ( uValues-- )
tOut.SendFloat ( *pValues++ );
}
static ESphAttr FixupAttrForNetwork ( const CSphColumnInfo & tCol, const CSphSchema & tSchema, int iVer, WORD uMasterVer, bool bAgentMode )
{
bool bSendJson = ( bAgentMode && uMasterVer>=3 );
bool bSendJsonField = ( bAgentMode && uMasterVer>=4 );
switch ( tCol.m_eAttrType )
{
case SPH_ATTR_UINT32SET_PTR:
return SPH_ATTR_UINT32SET;
case SPH_ATTR_INT64SET_PTR:
return SPH_ATTR_INT64SET;
case SPH_ATTR_FLOAT_VECTOR_PTR:
return SPH_ATTR_FLOAT_VECTOR;
case SPH_ATTR_STRINGPTR:
{
if ( bAgentMode && uMasterVer>=18 && IsNotRealAttribute ( tCol ) )
return SPH_ATTR_STORED_FIELD;
else
return SPH_ATTR_STRING;
}
case SPH_ATTR_JSON:
case SPH_ATTR_JSON_PTR:
return bSendJson ? SPH_ATTR_JSON : SPH_ATTR_STRING;
case SPH_ATTR_JSON_FIELD:
case SPH_ATTR_JSON_FIELD_PTR:
return bSendJsonField ? SPH_ATTR_JSON_FIELD : SPH_ATTR_STRING;
case SPH_ATTR_DOUBLE:
return iVer<0x122 ? SPH_ATTR_FLOAT : SPH_ATTR_DOUBLE;
case SPH_ATTR_UINT64:
return iVer<0x123 ? SPH_ATTR_BIGINT : SPH_ATTR_UINT64;
default: return tCol.m_eAttrType;
}
}
static void SendSchema ( ISphOutputBuffer & tOut, const AggrResult_t & tRes, const CSphBitvec & tAttrsToSend, int iVer, WORD uMasterVer, bool bAgentMode )
{
int iFieldsCount = tRes.m_tSchema.GetFieldsCount();
tOut.SendInt ( iFieldsCount );
for ( int i=0; i < iFieldsCount; ++i )
tOut.SendString ( tRes.m_tSchema.GetFieldName(i) );
tOut.SendInt ( tAttrsToSend.BitCount() );
for ( int i=0; i<tRes.m_tSchema.GetAttrsCount(); ++i )
{
if ( !tAttrsToSend.BitGet(i) )
continue;
const CSphColumnInfo & tCol = tRes.m_tSchema.GetAttr(i);
tOut.SendString ( tCol.m_sName.cstr() );
ESphAttr eCol = FixupAttrForNetwork ( tCol, tRes.m_tSchema, iVer, uMasterVer, bAgentMode );
tOut.SendDword ( (DWORD)eCol );
}
}
static void SendAttribute ( ISphOutputBuffer & tOut, const CSphMatch & tMatch, const CSphColumnInfo & tAttr, int iVer, WORD uMasterVer, bool bAgentMode )
{
// at this point we should not have any attributes that point to pooled data
assert ( sphPlainAttrToPtrAttr(tAttr.m_eAttrType)==tAttr.m_eAttrType );
// send binary json only to master
bool bSendJson = bAgentMode && uMasterVer>=3;
bool bSendJsonField = bAgentMode && uMasterVer>=4;
const CSphAttrLocator & tLoc = tAttr.m_tLocator;
switch ( tAttr.m_eAttrType )
{
case SPH_ATTR_UINT32SET_PTR:
case SPH_ATTR_INT64SET_PTR:
SendMVA ( tOut, (const BYTE*)tMatch.GetAttr(tLoc), tAttr.m_eAttrType==SPH_ATTR_INT64SET_PTR );
break;
case SPH_ATTR_FLOAT_VECTOR_PTR:
SendFloatVec ( tOut, (const BYTE*)tMatch.GetAttr(tLoc) );
break;
case SPH_ATTR_JSON_PTR:
SendJson ( tOut, (const BYTE*)tMatch.GetAttr(tLoc), bSendJson );
break;
case SPH_ATTR_STRINGPTR:
SendDataPtrAttr ( tOut, (const BYTE*)tMatch.GetAttr(tLoc) );
break;
case SPH_ATTR_JSON_FIELD_PTR:
SendJsonField ( tOut, (const BYTE*)tMatch.GetAttr(tLoc), bSendJsonField );
break;
case SPH_ATTR_FACTORS:
case SPH_ATTR_FACTORS_JSON:
if ( iVer<0x11C )
{
tOut.SendDword ( 0 );
break;
}
SendDataPtrAttr ( tOut, (const BYTE*)tMatch.GetAttr(tLoc) );
break;
case SPH_ATTR_FLOAT:
tOut.SendFloat ( tMatch.GetAttrFloat(tLoc) );
break;
case SPH_ATTR_DOUBLE:
if ( iVer<0x122 )
tOut.SendFloat ( (float)tMatch.GetAttrDouble(tLoc) );
else
tOut.SendDouble ( tMatch.GetAttrDouble(tLoc) );
break;
case SPH_ATTR_BIGINT:
case SPH_ATTR_UINT64:
tOut.SendUint64 ( tMatch.GetAttr(tLoc) );
break;
default:
tOut.SendDword ( (DWORD)tMatch.GetAttr(tLoc) );
break;
}
}
void SendResult ( int iVer, ISphOutputBuffer & tOut, const AggrResult_t& tRes, bool bAgentMode, const CSphQuery & tQuery, WORD uMasterVer )
{
// multi-query status
bool bError = !tRes.m_sError.IsEmpty();
bool bWarning = !bError && !tRes.m_sWarning.IsEmpty();
bError |= tRes.m_dResults.IsEmpty() && tQuery.m_bFacet;
assert ( bError || tRes.m_bSingle );
assert ( bError || tRes.m_bOneSchema );
if ( bError )
{
tOut.SendInt ( SEARCHD_ERROR ); // fixme! m.b. use APICommand_t and refactor to common API way
tOut.SendString ( tRes.m_sError.cstr() );
if ( g_bOptNoDetach && g_eLogFormat!=LOG_FORMAT_SPHINXQL )
sphInfo ( "query error: %s", tRes.m_sError.cstr() );
return;
} else if ( bWarning )
{
tOut.SendDword ( SEARCHD_WARNING );
tOut.SendString ( tRes.m_sWarning.cstr() );
if ( g_bOptNoDetach && g_eLogFormat!=LOG_FORMAT_SPHINXQL )
sphInfo ( "query warning: %s", tRes.m_sWarning.cstr() );
} else
tOut.SendDword ( SEARCHD_OK );
CSphBitvec tAttrsToSend;
sphGetAttrsToSend ( tRes.m_tSchema, bAgentMode, false, tAttrsToSend );
// send schema
SendSchema ( tOut, tRes, tAttrsToSend, iVer, uMasterVer, bAgentMode );
// send matches
tOut.SendInt ( tRes.m_iCount );
tOut.SendInt ( 1 ); // was USE_64BIT
CSphVector<BYTE> dJson ( 512 );
auto& dResult = tRes.m_dResults.First();
auto dMatches = dResult.m_dMatches.Slice ( tRes.m_iOffset, tRes.m_iCount );
for ( const CSphMatch & tMatch : dMatches )
{
Verify ( tRes.m_tSchema.GetAttr(sphGetDocidName()) );
tOut.SendUint64 ( sphGetDocID(tMatch.m_pDynamic) );
tOut.SendInt ( tMatch.m_iWeight );
assert ( tMatch.m_pStatic || !tRes.m_tSchema.GetStaticSize() );
#if 0
// not correct any more because of internal attrs (such as string sorting ptrs)
assert ( tMatch.m_pDynamic || !pRes->m_tSchema.GetDynamicSize() );
assert ( !tMatch.m_pDynamic || (int)tMatch.m_pDynamic[-1]==pRes->m_tSchema.GetDynamicSize() );
#endif
for ( int j=0; j<tRes.m_tSchema.GetAttrsCount(); ++j )
if ( tAttrsToSend.BitGet(j) )
SendAttribute ( tOut, tMatch, tRes.m_tSchema.GetAttr(j), iVer, uMasterVer, bAgentMode );
}
if ( tQuery.m_bAgent && tQuery.m_iLimit )
tOut.SendInt ( tRes.m_iCount );
else
tOut.SendInt ( dResult.m_dMatches.GetLength() );
tOut.SendAsDword ( tRes.m_iTotalMatches );
if ( bAgentMode && uMasterVer>=19 )
tOut.SendInt ( tRes.m_bTotalMatchesApprox ? 1 : 0 );
tOut.SendInt ( Max ( tRes.m_iQueryTime, 0 ) );
if ( iVer>=0x11A && bAgentMode )
{
bool bNeedPredictedTime = tQuery.m_iMaxPredictedMsec > 0;
BYTE uStatMask = ( bNeedPredictedTime ? 4U : 0U ) | ( g_bCpuStats ? 2U : 0U ) | ( g_bIOStats ? 1U : 0U );
tOut.SendByte ( uStatMask );
if ( g_bIOStats )
{
CSphIOStats tStats = tRes.m_tIOStats;
tStats.Add ( tRes.m_tAgentIOStats );
tOut.SendUint64 ( tStats.m_iReadTime );
tOut.SendDword ( tStats.m_iReadOps );
tOut.SendUint64 ( tStats.m_iReadBytes );
tOut.SendUint64 ( tStats.m_iWriteTime );
tOut.SendDword ( tStats.m_iWriteOps );
tOut.SendUint64 ( tStats.m_iWriteBytes );
}
if ( g_bCpuStats )
{
int64_t iCpuTime = tRes.m_iCpuTime + tRes.m_iAgentCpuTime;
tOut.SendUint64 ( iCpuTime );
}
if ( bNeedPredictedTime )
tOut.SendUint64 ( tRes.m_iPredictedTime + tRes.m_iAgentPredictedTime );
}
if ( bAgentMode && uMasterVer>=7 )
{
tOut.SendDword ( tRes.m_tStats.m_iFetchedDocs + tRes.m_iAgentFetchedDocs );
tOut.SendDword ( tRes.m_tStats.m_iFetchedHits + tRes.m_iAgentFetchedHits );
if ( uMasterVer>=8 )
tOut.SendDword ( tRes.m_tStats.m_iSkips + tRes.m_iAgentFetchedSkips );
}
auto dWords = tRes.MakeSortedWordStat ();
tOut.SendInt ( dWords.GetLength() );
for( auto * pWord : dWords )
{
assert ( pWord );
tOut.SendString ( pWord->first.cstr () );
tOut.SendAsDword ( pWord->second.first );
tOut.SendAsDword ( pWord->second.second );
if ( bAgentMode )
tOut.SendByte ( 0 ); // statistics have no expanded terms for now
}
}
/////////////////////////////////////////////////////////////////////////////
int AggrResult_t::GetLength () const
{
int iCount = 0;
m_dResults.Apply ( [&iCount] ( const OneResultset_t & a ) { iCount += a.m_dMatches.GetLength (); } );
return iCount;
}
bool AggrResult_t::AddResultset ( ISphMatchSorter * pQueue, const DocstoreReader_i * pDocstore, int iTag, int iCutoff )
{
assert ( pQueue );
if ( !pQueue->GetLength () )
{
m_tSchema = *pQueue->GetSchema ();
return false;
}
// extract matches from sorter
auto & tOneRes = m_dResults.Add ();
tOneRes.m_pDocstore = pDocstore;
tOneRes.m_iTag = iTag;
tOneRes.FillFromSorter ( pQueue );
// in MT case each thread has its own cutoff, so we have to enforce it again on the result set
if ( iCutoff>0 )
{
m_iTotalMatches = Min ( iCutoff, m_iTotalMatches );
tOneRes.ClampMatches(iCutoff);
}
return true;
}
void AggrResult_t::AddEmptyResultset ( const DocstoreReader_i * pDocstore, int iTag )
{
auto & tOneRes = m_dResults.Add();
tOneRes.m_pDocstore = pDocstore;
tOneRes.m_iTag = iTag;
tOneRes.m_tSchema = m_tSchema;
}
void AggrResult_t::ClampMatches ( int iLimit )
{
assert ( m_bSingle );
m_dResults.First ().ClampMatches ( iLimit );
}
void AggrResult_t::ClampAllMatches ()
{
for ( auto& dResult : m_dResults )
dResult.ClampAllMatches();
}
int OneResultset_t::FillFromSorter ( ISphMatchSorter * pQueue )
{
if ( !pQueue )
return 0;
assert ( m_dMatches.IsEmpty () );
m_tSchema = *pQueue->GetSchema ();
if ( !pQueue->GetLength () )
return 0;
int iCopied = pQueue->Flatten ( m_dMatches.AddN ( pQueue->GetLength () ) );
m_dMatches.Resize ( iCopied );
return iCopied;
}
void OneResultset_t::ClampAllMatches ()
{
for ( auto& dMatch : m_dMatches )
{
m_tSchema.FreeDataPtrs ( dMatch );
dMatch.ResetDynamic();
}
m_dMatches.Reset();
}
void OneResultset_t::ClampMatches ( int iLimit )
{
assert ( iLimit>0 );
int iMatches = m_dMatches.GetLength ();
for ( int i = iLimit; i<iMatches; ++i )
{
m_tSchema.FreeDataPtrs ( m_dMatches[i] );
m_dMatches[i].ResetDynamic();
}
m_dMatches.Resize ( Min (iMatches, iLimit ) );
}
OneResultset_t::~OneResultset_t()
{
ClampAllMatches();
}
namespace { // static
void RemapResult ( AggrResult_t & dResult )
{
const ISphSchema & tSchema = dResult.m_tSchema;
int iAttrsCount = tSchema.GetAttrsCount();
CSphVector<int> dMapFrom(iAttrsCount);
CSphVector<int> dRowItems(iAttrsCount);
static const int SIZE_OF_ROW = 8 * sizeof ( CSphRowitem );
for ( auto & tRes : dResult.m_dResults )
{
if ( tRes.m_dMatches.IsEmpty() )
continue;
dMapFrom.Resize ( 0 );
dRowItems.Resize ( 0 );
CSphSchema & dSchema = tRes.m_tSchema;
for ( int i = 0; i<iAttrsCount; ++i )
{
auto iSrcCol = dSchema.GetAttrIndex ( tSchema.GetAttr ( i ).m_sName.cstr () );
dMapFrom.Add ( iSrcCol );
dRowItems.Add ( dSchema.GetAttr ( iSrcCol ).m_tLocator.m_iBitOffset / SIZE_OF_ROW );
assert ( dMapFrom[i]>=0
|| IsSortStringInternal ( tSchema.GetAttr(i).m_sName )
|| IsSortJsonInternal ( tSchema.GetAttr(i).m_sName )
);
}
// inverse dRowItems - we'll free only those NOT enumerated yet
dRowItems = dSchema.SubsetPtrs ( dRowItems );
for ( auto& tMatch : tRes.m_dMatches )
{
// create new and shiny (and properly sized) match
CSphMatch tNewMatch;
tNewMatch.Reset ( tSchema.GetDynamicSize () );
tNewMatch.m_tRowID = tMatch.m_tRowID;
tNewMatch.m_iWeight = tMatch.m_iWeight;
// remap attrs
for ( int j = 0; j<iAttrsCount; ++j )
{
const CSphColumnInfo & tDst = tSchema.GetAttr ( j );
// we could keep some of the rows static
// and so, avoid the duplication of the data.
int iMapFrom = dMapFrom[j];
const CSphColumnInfo & tSrc = dSchema.GetAttr ( iMapFrom );
if ( !tDst.m_tLocator.m_bDynamic )
{
assert ( iMapFrom<0 || !dSchema.GetAttr ( iMapFrom ).m_tLocator.m_bDynamic );
tNewMatch.m_pStatic = tMatch.m_pStatic;
} else if ( iMapFrom>=0 )
{
if ( tDst.m_eAttrType==SPH_ATTR_FLOAT && tSrc.m_eAttrType==SPH_ATTR_BOOL )
{
tNewMatch.SetAttrFloat ( tDst.m_tLocator, ( tMatch.GetAttr ( tSrc.m_tLocator )>0 ? 1.0f : 0.0f ) );
} else
{
tNewMatch.SetAttr ( tDst.m_tLocator, tMatch.GetAttr ( tSrc.m_tLocator ) );
}
}
}
// swap out old (most likely wrong sized) match
Swap ( tMatch, tNewMatch );
CSphSchemaHelper::FreeDataSpecial ( tNewMatch, dRowItems );
}
}
}
bool GetIndexSchemaItems ( const ISphSchema & tSchema, const CSphVector<CSphQueryItem> & dItems, CSphVector<int> & dAttrs )
{
bool bHaveAsterisk = false;
for ( const auto & i : dItems )
{
if ( i.m_sAlias.cstr() )
{
int iAttr = tSchema.GetAttrIndex ( i.m_sAlias.cstr() );
if ( iAttr>=0 )
dAttrs.Add(iAttr);
}
bHaveAsterisk |= i.m_sExpr=="*";
}
dAttrs.Sort();
return bHaveAsterisk;
}
bool GetItemsLeftInSchema ( const ISphSchema & tSchema, bool bOnlyPlain, const CSphVector<int> & dAttrs, CSphVector<int> & dAttrsInSchema )
{
bool bHaveExprs = false;
for ( int i = 0, iAttrsCount = tSchema.GetAttrsCount (); i<iAttrsCount; ++i )
{
const CSphColumnInfo & tAttr = tSchema.GetAttr(i);
if ( tAttr.m_pExpr )
{
bHaveExprs = true;
// need to keep post-limit expression (stored field) for multi-query \ facet
// also keep columnar attributes (with expressions)
if ( bOnlyPlain && !tAttr.m_pExpr->IsColumnar() && tAttr.m_eStage!=SPH_EVAL_POSTLIMIT )
continue;
}
if ( !IsGroupbyMagic ( tAttr.m_sName ) && !IsSortStringInternal ( tAttr.m_sName ) && !dAttrs.BinarySearch(i) )
dAttrsInSchema.Add(i);
}
return bHaveExprs;
}
struct AttrSort_fn
{
const ISphSchema & m_tSchema;
AttrSort_fn ( const ISphSchema & tSchema )
: m_tSchema ( tSchema )
{}
bool IsLess ( int iA, int iB ) const
{
const auto & sNameA = m_tSchema.GetAttr(iA).m_sName;
const auto & sNameB = m_tSchema.GetAttr(iB).m_sName;
bool bDocIdA = sNameA==sphGetDocidName();
bool bDocIdB = sNameB==sphGetDocidName();
if ( bDocIdA || bDocIdB )
return bDocIdA || !bDocIdB;
bool bBlobLocA = sNameA==sphGetBlobLocatorName();
bool bBlobLocB = sNameB==sphGetBlobLocatorName();
if ( bBlobLocA ||bBlobLocB )
return bBlobLocA || !bBlobLocB;
bool bFieldA = !!m_tSchema.GetField ( sNameA.cstr() );
bool bFieldB = !!m_tSchema.GetField ( sNameB.cstr() );
if ( bFieldA || bFieldB )
{
if ( bFieldA && bFieldB )
{
int iFieldIdA = m_tSchema.GetFieldIndex ( sNameA.cstr() );
int iFieldIdB = m_tSchema.GetFieldIndex ( sNameB.cstr() );
return iFieldIdA < iFieldIdB;
}
return bFieldA || !bFieldB;
}
int iIndexA = m_tSchema.GetAttr(iA).m_iIndex;
int iIndexB = m_tSchema.GetAttr(iB).m_iIndex;
if ( iIndexA == -1 && iIndexB == -1 )
return iA < iB;
return iIndexA != -1 && ( iIndexB == -1 || iIndexA < iIndexB );
}
};
void DoExpansion ( const ISphSchema & tSchema, const CSphVector<int> & dAttrsInSchema, const CSphVector<CSphQueryItem> & dItems, CSphVector<CSphQueryItem> & dExpanded )
{
bool bExpandedAsterisk = false;
for ( const auto & i : dItems )
{
if ( i.m_sExpr=="*" )
{
if ( bExpandedAsterisk )
continue;
bExpandedAsterisk = true;
IntVec_t dSortedAttrsInSchema = dAttrsInSchema;
dSortedAttrsInSchema.Sort ( AttrSort_fn(tSchema) );
for ( auto iAttr : dSortedAttrsInSchema )
{
const CSphColumnInfo & tCol = tSchema.GetAttr(iAttr);
CSphQueryItem & tExpanded = dExpanded.Add();
tExpanded.m_sExpr = tCol.m_sName;
if ( tCol.m_pExpr ) // stored fields
tExpanded.m_sAlias = tCol.m_sName;
}
}
else
dExpanded.Add(i);
}
}
// rebuild the results itemlist expanding stars
const CSphVector<CSphQueryItem> & ExpandAsterisk ( const ISphSchema & tSchema, const CSphVector<CSphQueryItem> & dItems, CSphVector<CSphQueryItem> & tExpanded, bool bOnlyPlain, bool & bHaveExprs )
{
// the result schema usually is the index schema + calculated items + @-items
// we need to extract the index schema only
CSphVector<int> dIndexSchemaAttrs;
bool bHaveAsterisk = GetIndexSchemaItems ( tSchema, dItems, dIndexSchemaAttrs );
// no stars? Nothing to do.
if ( !bHaveAsterisk )
return dItems;
// find items that are in index schema but not in our requested item list
// not do not include @-items
CSphVector<int> dAttrsLeftInSchema;
bHaveExprs = GetItemsLeftInSchema ( tSchema, bOnlyPlain, dIndexSchemaAttrs, dAttrsLeftInSchema );
DoExpansion ( tSchema, dAttrsLeftInSchema, dItems, tExpanded );
return tExpanded;
}
// in MatchIterator_c we need matches sorted assending by DocID.
// also we don't want to sort matches themselves; sorted vec of indexes quite enough
// also we wont to avoid allocating vec for the matches as it may be huge.
// There are several possible solutions to have vec of indexes:
// 1. Use matches tags, as they're not used in this part of code. With intensive working it is however not a good in
// terms of cache misses (i.e. 'min' match is match[N] where N is match[0].tag, then match[M] where M is match[1] tag.
// So each time we make about random jumps.
// 2. Use space between last match and end of the vector (assuming reserved space > used space). If it is enough space,
// we can use it either as vec or WORDS, or as vec or DWORDS, depending from N of matches. First case need at most 128K
// of RAM, second needs more, but that RAM is compact.
// So, let's try with tail space first, but if it is not available (no, or not enough space), use tags.
// That is to sort tags in matches without moving rest of them.
class MatchTagSortAccessor_c
{
const VecTraits_T<CSphMatch> & m_dTagOrder;
public:
explicit MatchTagSortAccessor_c ( const VecTraits_T<CSphMatch> & dTagOrder) : m_dTagOrder ( dTagOrder ) {}
using T = CSphMatch;
using MEDIAN_TYPE = int;
static MEDIAN_TYPE Key ( T * a ) { return a->m_iTag; }
static void Swap ( T * a, T * b ) { ::Swap ( a->m_iTag, b->m_iTag ); }
static T * Add ( T * p, int i ) { return p+i; }
static int Sub ( T * b, T * a ) { return (int)(b-a); }
static void CopyKey ( MEDIAN_TYPE * pMed, CSphMatch * pVal ) { *pMed = Key ( pVal ); }
bool IsLess ( int a, int b ) const
{
return sphGetDocID ( m_dTagOrder[a].m_pDynamic )<sphGetDocID ( m_dTagOrder[b].m_pDynamic );
}
};
class MatchIterator_c
{
int m_iRawIdx; // raw iteration index (internal)
int m_iLimit;
std::function<int(int)> m_fnOrder; // use to access matches by accending docid order
bool m_bTailClean = false;
// use space after end of matches to store indexes, WORD per match
bool MaybeUseWordOrder ( const CSphSwapVector<CSphMatch>& dMatches ) const
{
if ( dMatches.GetLength()>0x10000 )
return false;
int64_t iTail = dMatches.AllocatedBytes ()-dMatches.GetLengthBytes64 ();
if ( iTail<(int64_t) ( dMatches.GetLength () * sizeof ( WORD ) ) )
return false;
// will use tail of the vec as blob of WORDs
VecTraits_T<WORD> dOrder = { (WORD *) dMatches.end (), m_iLimit };
ARRAY_CONSTFOREACH( i, dOrder )
dOrder[i] = i;
dOrder.Sort ( Lesser ( [&dMatches] ( WORD a, WORD b ) {
return sphGetDocID ( dMatches[a].m_pDynamic )<sphGetDocID ( dMatches[b].m_pDynamic );
} ) );
return true;
}
// use space after end of matches to store indexes, DWORD per match
bool MaybeUseDwordOrder ( const CSphSwapVector<CSphMatch>& dMatches ) const
{
if ( dMatches.GetLength64()>0x100000000 )
return false;
int64_t iTail = dMatches.AllocatedBytes ()-dMatches.GetLengthBytes64 ();
if ( iTail<(int64_t) ( dMatches.GetLength () * sizeof ( DWORD ) ) )
return false;
// will use tail of the vec as blob of WORDs
VecTraits_T<DWORD> dOrder = { (DWORD *) dMatches.end (), m_iLimit };
for( DWORD i=0, uLen=dOrder.GetLength(); i<uLen; ++i )
dOrder[i] = i;
dOrder.Sort ( Lesser ( [&dMatches] ( DWORD a, DWORD b ) {
return sphGetDocID ( dMatches[a].m_pDynamic )<sphGetDocID ( dMatches[b].m_pDynamic );
} ) );
return true;
}
// use tags to store indexes. No extra space, but random access order, many cash misses expected
void UseTags ( VecTraits_T<CSphMatch> & dOrder )
{
ARRAY_CONSTFOREACH( i, dOrder )
dOrder[i].m_iTag = i;
MatchTagSortAccessor_c tOrder ( dOrder );
sphSort ( dOrder.Begin (), dOrder.GetLength (), tOrder, tOrder );
m_bTailClean = true;
}
public:
OneResultset_t& m_tResult;
DocID_t m_tDocID;
int m_iIdx; // ordering index (each step gives matches in sorted by Docid order)
explicit MatchIterator_c ( OneResultset_t & tResult )
: m_tResult ( tResult )
{
auto& dMatches = tResult.m_dMatches;
m_iLimit = dMatches.GetLength();
if ( MaybeUseWordOrder ( dMatches ) )
m_fnOrder = [pData = (WORD *) m_tResult.m_dMatches.end ()] ( int i ) { return pData[i]; };
else if ( MaybeUseDwordOrder ( dMatches ) )
m_fnOrder = [pData = (DWORD *) m_tResult.m_dMatches.end ()] ( int i ) { return pData[i]; };
else
{
UseTags ( dMatches );
m_fnOrder = [this] ( int i ) { return m_tResult.m_dMatches[m_iRawIdx].m_iTag; };
}
m_iRawIdx = 0;
m_iIdx = m_fnOrder(0);
assert ( m_tResult.m_tSchema.GetAttr ( sphGetDocidName() ) );
m_tDocID = sphGetDocID ( m_tResult.m_dMatches[m_iIdx].m_pDynamic );
}
~MatchIterator_c()
{
if ( m_bTailClean )
return;
// need to reset state of some tail matches in order to avoid issues when deleting the vec of them
// (since we used that memory region for own purposes)
int iDirtyMatches = m_iLimit>0x10000 ? m_iLimit * sizeof ( DWORD ) : m_iLimit * sizeof ( WORD );
iDirtyMatches = ( iDirtyMatches+sizeof ( CSphMatch )-1 ) / sizeof ( CSphMatch );
for ( int i = 0; i<iDirtyMatches; ++i )
( m_tResult.m_dMatches.end ()+i )->CleanGarbage();
}
inline bool Step()
{
++m_iRawIdx;
if ( m_iRawIdx>=m_iLimit )
return false;
m_iIdx = m_fnOrder ( m_iRawIdx );
m_tDocID = sphGetDocID ( m_tResult.m_dMatches[m_iIdx].m_pDynamic );
return true;
}
static inline bool IsLess ( MatchIterator_c *a, MatchIterator_c *b )
{
if ( a->m_tDocID!=b->m_tDocID )
return a->m_tDocID<b->m_tDocID;
// that mean local matches always preffered over remote, but it seems that is not necessary
// if ( !a->m_dResult.m_bTag && b->m_dResult.m_bTag )
// return true;
return a->m_tResult.m_iTag>b->m_tResult.m_iTag;
}
};
int KillPlainDupes ( ISphMatchSorter * pSorter, AggrResult_t & tRes )
{
int iDupes = 0;
auto& dResults = tRes.m_dResults;
// normal sorter needs massage
// queue by docid and then ascending by tag to guarantee the replacement order
RawVector_T <MatchIterator_c> dIterators;
dIterators.Reserve_static ( dResults.GetLength () );
CSphQueue<MatchIterator_c *, MatchIterator_c> qMatches ( dResults.GetLength () );
for ( auto & tResult : dResults )
if ( !tResult.m_dMatches.IsEmpty() )
{
dIterators.Emplace_back(tResult);
qMatches.Push ( &dIterators.Last() );
}
DocID_t tPrevDocID = DOCID_MIN;
while ( qMatches.GetLength() )
{
auto * pMin = qMatches.Root();
DocID_t tDocID = pMin->m_tDocID;
if ( tDocID!=tPrevDocID ) // by default, simply remove dupes (select first by tag)
{
CSphMatch & tMatch = pMin->m_tResult.m_dMatches[pMin->m_iIdx];
auto iTag = tMatch.m_iTag; // as we may use tag for ordering
if ( !pMin->m_tResult.m_bTagsAssigned )
tMatch.m_iTag = pMin->m_tResult.m_iTag; // that will link us back to docstore
pSorter->Push ( tMatch );
tMatch.m_iTag = iTag; // restore tag
tPrevDocID = tDocID;
}
else
++iDupes;
qMatches.Pop ();
if ( pMin->Step() )
qMatches.Push ( pMin );
}
tRes.m_bTagsAssigned = true;
return iDupes;
}
int KillGroupbyDupes ( ISphMatchSorter * pSorter, AggrResult_t & tRes, const VecTraits_T<int>& dOrd )
{
int iDupes = 0;
pSorter->SetBlobPool ( nullptr );
for ( int iOrd : dOrd )
{
auto & tResult = tRes.m_dResults[iOrd];
ARRAY_CONSTFOREACH( i, tResult.m_dMatches )
{
CSphMatch & tMatch = tResult.m_dMatches[i];
if ( !tResult.m_bTagsAssigned )
tMatch.m_iTag = tResult.m_iTag; // that will link us back to docstore
if ( !pSorter->PushGrouped ( tMatch, i==0 ) ) // groupby sorter does that automagically
++iDupes;
}
}
tRes.m_bTagsAssigned = true;
return iDupes;
}
// rearrange results so thet the're placed by accending tags order
// dOrd contains indexes to access results in descending tag order
void SortTagsAndDocstores ( AggrResult_t & tRes, const VecTraits_T<int>& dOrd )
{
auto iTags = dOrd.GetLength ();
CSphFixedVector<DocstoreAndTag_t> dTmp { iTags };
auto & dResults = tRes.m_dResults;
for ( int i=0; i<iTags; ++i )
dTmp[iTags-i-1].Assign ( dResults[dOrd[i]] );
for ( int i = 0; i<iTags; ++i )
dResults[i].Assign ( dTmp[i] );
Debug ( tRes.m_bIdxByTag = true; )
}
int KillDupesAndFlatten ( ISphMatchSorter * pSorter, AggrResult_t & tRes )
{
assert ( pSorter );
int iTags = tRes.m_dResults.GetLength();
CSphFixedVector<int> dOrd ( iTags );
ARRAY_CONSTFOREACH( i, dOrd )
dOrd[i] = i;
// sort resultsets in descending tag order
dOrd.Sort ( Lesser ( [&tRes] ( int l, int r ) { return tRes.m_dResults[r].m_iTag<tRes.m_dResults[l].m_iTag; } ) );
// remap to compact (non-fragmented) range of tags
for ( int iRes : dOrd )
tRes.m_dResults[iRes].m_iTag = --iTags;
Debug ( tRes.m_bTagsCompacted = true );
// do actual deduplication
int iDup = pSorter->IsGroupby() ? KillGroupbyDupes ( pSorter, tRes, dOrd ) : KillPlainDupes ( pSorter, tRes );
// ALL matches have same schema, as KillAllDupes called after RemapResults(), or already having identical schemas.
for ( auto& dResult : tRes.m_dResults )
{
for ( auto& dMatch : dResult.m_dMatches )
tRes.m_tSchema.FreeDataPtrs ( dMatch );
dResult.m_dMatches.Reset();
}
// don't issue tRes.m_dResults.reset since each result still has a docstore by tag
// flatten all results into single chunk
auto & tFinalMatches = tRes.m_dResults.First ();
tFinalMatches.FillFromSorter ( pSorter );
Debug ( tRes.m_bSingle = true; )
Debug ( tRes.m_bOneSchema = true; )
// now all matches properly tagged located in tRes.m_dResults.First()
// each tRes.m_dResults has proper tag and corresponding docstore pointer in random order
// and we have dOrd wich enumerates them in descending tag order
SortTagsAndDocstores ( tRes, dOrd );
return iDup;
}
void RecoverAggregateFunctions ( const CSphQuery & tQuery, const AggrResult_t & tRes )
{
for ( const auto& tItem : tQuery.m_dItems )
{
if ( tItem.m_eAggrFunc==SPH_AGGR_NONE )
continue;
for ( int j = 0, iAttrsCount = tRes.m_tSchema.GetAttrsCount (); j<iAttrsCount; ++j )
{
auto & tCol = const_cast<CSphColumnInfo&> ( tRes.m_tSchema.GetAttr(j) );
if ( tCol.m_sName==tItem.m_sAlias )
{
assert ( tCol.m_eAggrFunc==SPH_AGGR_NONE );
tCol.m_eAggrFunc = tItem.m_eAggrFunc;
}
}
}
}
struct GenericMatchSort_fn : public CSphMatchComparatorState
{
bool IsLess ( const CSphMatch * a, const CSphMatch * b ) const
{
for ( int i=0; i<CSphMatchComparatorState::MAX_ATTRS; i++ )
switch ( m_eKeypart[i] )
{
case SPH_KEYPART_ROWID:
if ( a->m_tRowID==b->m_tRowID )
continue;
return ( ( m_uAttrDesc>>i ) & 1 ) ^ ( a->m_tRowID < b->m_tRowID );
case SPH_KEYPART_WEIGHT:
if ( a->m_iWeight==b->m_iWeight )
continue;
return ( ( m_uAttrDesc>>i ) & 1 ) ^ ( a->m_iWeight < b->m_iWeight );
case SPH_KEYPART_INT:
{
SphAttr_t aa = a->GetAttr ( m_tLocator[i] );
SphAttr_t bb = b->GetAttr ( m_tLocator[i] );
if ( aa==bb )
continue;
return ( ( m_uAttrDesc>>i ) & 1 ) ^ ( aa < bb );
}
case SPH_KEYPART_FLOAT:
{
float aa = a->GetAttrFloat ( m_tLocator[i] );
float bb = b->GetAttrFloat ( m_tLocator[i] );
if ( aa==bb )
continue;
return ( ( m_uAttrDesc>>i ) & 1 ) ^ ( aa < bb );
}
case SPH_KEYPART_DOUBLE:
{
double aa = a->GetAttrDouble ( m_tLocator[i] );
double bb = b->GetAttrDouble ( m_tLocator[i] );
if ( aa==bb )
continue;
return ( ( m_uAttrDesc>>i ) & 1 ) ^ ( aa < bb );
}
case SPH_KEYPART_STRINGPTR:
case SPH_KEYPART_STRING:
{
int iCmp = CmpStrings ( *a, *b, i );
if ( iCmp!=0 )
return ( ( m_uAttrDesc>>i ) & 1 ) ^ ( iCmp < 0 );
break;
}
}
return a->m_tRowID<b->m_tRowID;
}
};
void ExtractPostlimit ( const ISphSchema & tSchema, bool bMaster, CSphVector<const CSphColumnInfo *> & dPostlimit )
{
for ( int i=0; i<tSchema.GetAttrsCount(); ++i )
{
const CSphColumnInfo & tCol = tSchema.GetAttr ( i );
if ( tCol.m_eStage==SPH_EVAL_POSTLIMIT && ( bMaster || tCol.m_uFieldFlags==CSphColumnInfo::FIELD_NONE ) )
dPostlimit.Add ( &tCol );
}
}
// for single chunk of matches return list of tags with docstores
CSphVector<int> GetUniqueTagsWithDocstores ( const AggrResult_t & tRes, int iOff, int iLim )
{
assert ( tRes.m_bTagsCompacted );
assert ( tRes.m_bSingle );
CSphVector<bool> dBoolTags;
dBoolTags.Resize ( tRes.m_dResults.GetLength() );
dBoolTags.ZeroVec();
auto dMatches = tRes.m_dResults.First ().m_dMatches.Slice ( iOff, iLim );
for ( const auto& dMatch : dMatches )
{
assert ( dMatch.m_iTag < tRes.m_dResults.GetLength() );
if ( tRes.m_dResults[dMatch.m_iTag].Docstore() )
dBoolTags[dMatch.m_iTag] = true;
}
CSphVector<int> dTags;
ARRAY_CONSTFOREACH( i, dBoolTags )
if ( dBoolTags[i] )
dTags.Add(i);
return dTags;
}
void SetupPostlimitExprs ( const DocstoreReader_i * pDocstore, const CSphColumnInfo * pCol, const char * sQuery, int64_t iDocstoreSessionId )
{
DocstoreSession_c::InfoDocID_t tSessionInfo;
tSessionInfo.m_pDocstore = pDocstore;
tSessionInfo.m_iSessionId = iDocstoreSessionId;
assert ( pCol && pCol->m_pExpr );
pCol->m_pExpr->Command ( SPH_EXPR_SET_DOCSTORE_DOCID, &tSessionInfo ); // value is copied; no leak of pointer to local here.
pCol->m_pExpr->Command ( SPH_EXPR_SET_QUERY, (void *)sQuery);
}
void EvalPostlimitExprs ( CSphMatch & tMatch, const CSphColumnInfo * pCol )
{
assert ( pCol && pCol->m_pExpr );
switch ( pCol->m_eAttrType )
{
case SPH_ATTR_TIMESTAMP:
case SPH_ATTR_INTEGER:
case SPH_ATTR_BOOL:
tMatch.SetAttr ( pCol->m_tLocator, pCol->m_pExpr->IntEval ( tMatch ) );
break;
case SPH_ATTR_BIGINT:
tMatch.SetAttr ( pCol->m_tLocator, pCol->m_pExpr->Int64Eval ( tMatch ) );
break;
case SPH_ATTR_STRINGPTR:
// FIXME! a potential leak of *previous* value?
tMatch.SetAttr ( pCol->m_tLocator, (SphAttr_t) pCol->m_pExpr->StringEvalPacked ( tMatch ) );
break;
default:
tMatch.SetAttrFloat ( pCol->m_tLocator, pCol->m_pExpr->Eval ( tMatch ) );
break;
}
}
// single resultset cunk, but has many tags
void ProcessMultiPostlimit ( AggrResult_t & tRes, VecTraits_T<const CSphColumnInfo *> & dPostlimit, const char * sQuery, int iOff, int iLim )
{
if ( dPostlimit.IsEmpty() )
return;
assert ( tRes.m_bSingle );
assert ( tRes.m_bOneSchema );
assert ( tRes.m_bTagsAssigned );
assert ( tRes.m_bTagsCompacted );
assert ( tRes.m_bIdxByTag );
// collect unique tags from matches
CSphVector<int> dDocstoreTags = GetUniqueTagsWithDocstores ( tRes, iOff, iLim );
// generates docstore session id
DocstoreSession_c tSession;
auto iSessionUID = tSession.GetUID();
// spawn buffered readers for the current session
// put them to a global hash
for ( int iTag : dDocstoreTags )
tRes.m_dResults[iTag].m_pDocstore->CreateReader ( iSessionUID );
int iLastTag = -1;
auto dMatches = tRes.m_dResults.First ().m_dMatches.Slice ( iOff, iLim );
for ( auto & dMatch : dMatches )
{
int iTag = dMatch.m_iTag;
if ( tRes.m_dResults[iTag].m_bTag )
continue; // remote match; everything should be precalculated
auto * pDocstore = tRes.m_dResults[iTag].Docstore ();
assert ( iTag<tRes.m_dResults.GetLength () );
if ( iTag!=iLastTag )
{
for ( const auto & pCol : dPostlimit )
SetupPostlimitExprs ( pDocstore, pCol, sQuery, iSessionUID );
iLastTag = iTag;
}
for ( const auto & pCol : dPostlimit )
EvalPostlimitExprs ( dMatch, pCol );
}
}
void ProcessSinglePostlimit ( OneResultset_t & tRes, VecTraits_T<const CSphColumnInfo *> & dPostlimit, const char * sQuery, int iOff, int iLim )
{
auto dMatches = tRes.m_dMatches.Slice ( iOff, iLim );
if ( dMatches.IsEmpty() )
return;
// generates docstore session id
DocstoreSession_c tSession;
auto iSessionUID = tSession.GetUID();
// spawn buffered reader for the current session
// put it to a global hash
if ( tRes.Docstore () )
tRes.m_pDocstore->CreateReader ( iSessionUID );
for ( const auto & pCol : dPostlimit )
SetupPostlimitExprs ( tRes.Docstore (), pCol, sQuery, iSessionUID );
for ( auto & tMatch : dMatches )
for ( const auto & pCol : dPostlimit )
EvalPostlimitExprs ( tMatch, pCol );
}
void ProcessLocalPostlimit ( AggrResult_t & tRes, const CSphQuery & tQuery, bool bMaster )
{
assert ( !tRes.m_bOneSchema );
assert ( !tRes.m_bSingle );
bool bGotPostlimit = false;
for ( int i = 0, iAttrsCount = tRes.m_tSchema.GetAttrsCount (); i<iAttrsCount && !bGotPostlimit; ++i )
{
const CSphColumnInfo & tCol = tRes.m_tSchema.GetAttr(i);
bGotPostlimit = ( tCol.m_eStage==SPH_EVAL_POSTLIMIT && ( bMaster || tCol.m_uFieldFlags==CSphColumnInfo::FIELD_NONE ) );
}
if ( !bGotPostlimit )
return;
int iLimit = ( tQuery.m_iOuterLimit ? tQuery.m_iOuterLimit : tQuery.m_iLimit );
iLimit += Max ( tQuery.m_iOffset, tQuery.m_iOuterOffset );
CSphVector<const CSphColumnInfo *> dPostlimit;
for ( auto & tResult : tRes.m_dResults )
{
dPostlimit.Resize ( 0 );
ExtractPostlimit ( tResult.m_tSchema, bMaster, dPostlimit );
if ( dPostlimit.IsEmpty () )
continue;
iLimit = ( tQuery.m_iOuterLimit ? tQuery.m_iOuterLimit : tQuery.m_iLimit );
// we can't estimate limit.offset per result set
// as matches got merged and sort next step
if ( !tResult.m_bTag )
ProcessSinglePostlimit ( tResult, dPostlimit, tQuery.m_sQuery.cstr (), 0, iLimit );
}
}
bool MinimizeSchemas ( AggrResult_t & tRes )
{
bool bAllEqual = true;
bool bSchemaBaseSet = false;
auto iResults = tRes.m_dResults.GetLength();
for ( int i=0; i<iResults; ++i )
{
// skip empty result set
if ( !tRes.m_dResults[i].m_dMatches.GetLength() )
continue;
// set base schema only from non-empty result set
if ( !bSchemaBaseSet )
{
bSchemaBaseSet = true;
tRes.m_tSchema = tRes.m_dResults[i].m_tSchema;
continue;
}
if ( !MinimizeSchema ( tRes.m_tSchema, tRes.m_dResults[i].m_tSchema ) )
bAllEqual = false;
}
// still want to set base schema from one of the result set
if ( !bSchemaBaseSet && bAllEqual && tRes.m_dResults.GetLength() )
tRes.m_tSchema = tRes.m_dResults[0].m_tSchema;
return bAllEqual;
}
//////////////////////////////////////////////////////////////////////////
bool MergeAllMatches ( AggrResult_t & tRes, const CSphQuery & tQuery, bool bHaveLocals, bool bAllEqual, bool bMaster, const CSphFilterSettings * pAggrFilter, QueryProfile_c * pProfiler )
{
ESphSortOrder eQuerySort = ( tQuery.m_sOuterOrderBy.IsEmpty() ? SPH_SORT_RELEVANCE : SPH_SORT_EXTENDED );
CSphQuery tQueryCopy = tQuery;
// got outer order? gotta do a couple things
if ( tQueryCopy.m_bHasOuter )
{
// first, temporarily patch up sorting clause and max_matches (we will restore them later)
Swap ( tQueryCopy.m_sOuterOrderBy, tQueryCopy.m_sGroupBy.IsEmpty() ? tQueryCopy.m_sSortBy : tQueryCopy.m_sGroupSortBy );
Swap ( eQuerySort, tQueryCopy.m_eSort );
// second, apply inner limit now, before (!) reordering
for ( auto & tResult : tRes.m_dResults )
tResult.ClampMatches ( tQueryCopy.m_iLimit );
}
// so we need to bring matches to the schema that the *sorter* wants
// so we need to create the sorter before conversion
//
// create queue
// at this point, we do not need to compute anything; it all must be here
SphQueueSettings_t tQueueSettings ( tRes.m_tSchema );
tQueueSettings.m_pAggrFilter = pAggrFilter;
// FIXME? probably not right; 20 shards with by 300 matches might be too much
// but propagating too small inner max_matches to the outer is not right either
if ( tQueryCopy.m_bHasOuter )
tQueueSettings.m_iMaxMatches = Min ( tQuery.m_iMaxMatches * tRes.m_dResults.GetLength(), tRes.GetLength() );
else
tQueueSettings.m_iMaxMatches = Min ( tQuery.m_iMaxMatches, tRes.GetLength() );
tQueueSettings.m_iMaxMatches = Max ( tQueueSettings.m_iMaxMatches, 1 );
tQueueSettings.m_bGrouped = true;
SphQueueRes_t tQueueRes;
std::unique_ptr<ISphMatchSorter> pSorter ( sphCreateQueue ( tQueueSettings, tQueryCopy, tRes.m_sError, tQueueRes ) );
// restore outer order related patches, or it screws up the query log
if ( tQueryCopy.m_bHasOuter )
{
Swap ( tQueryCopy.m_sOuterOrderBy, tQueryCopy.m_sGroupBy.IsEmpty() ? tQueryCopy.m_sSortBy : tQueryCopy.m_sGroupSortBy );
Swap ( eQuerySort, tQueryCopy.m_eSort );
}
if ( !pSorter )
return false;
pSorter->SetMerge(true);
// reset bAllEqual flag if sorter makes new attributes
if ( bAllEqual )
{
// at first we count already existed internal attributes
// then check if sorter makes more
int iRemapCount = GetStringRemapCount ( tRes.m_tSchema, tRes.m_tSchema );
int iNewCount = GetStringRemapCount ( *pSorter->GetSchema(), tRes.m_tSchema );
bAllEqual = ( iNewCount<=iRemapCount );
}
// sorter expects this
// just doing tRes.m_tSchema = *pSorter->GetSchema() won't work here
// because pSorter->GetSchema() may already contain a pointer to tRes.m_tSchema as m_pIndexSchema
// that's why we explicitly copy a CSphRsetSchema to a plain CSphSchema and move it to tRes.m_tSchema
{
CSphSchema tSchemaCopy;
tSchemaCopy = *pSorter->GetSchema();
tRes.m_tSchema.Swap ( tSchemaCopy );
}
// convert all matches to sorter schema - at least to manage all static to dynamic
if ( !bAllEqual )
{
// post-limit stuff first
if ( bHaveLocals )
{
CSphScopedProfile tProf ( pProfiler, SPH_QSTATE_EVAL_POST );
ProcessLocalPostlimit ( tRes, tQueryCopy, bMaster );
}
RemapResult ( tRes );
}
// do the sort work!
tRes.m_iTotalMatches -= KillDupesAndFlatten ( pSorter.get(), tRes );
return true;
}
bool ApplyOuterOrder ( AggrResult_t & tRes, const CSphQuery & tQuery )
{
assert ( !tRes.m_dResults.IsEmpty() );
// reorder (aka outer order)
ESphSortFunc eFunc;
GenericMatchSort_fn tReorder;
tReorder.m_fnStrCmp = GetStringCmpFunc ( tQuery.m_eCollation );
CSphVector<ExtraSortExpr_t> dExtraExprs;
ESortClauseParseResult eRes = sphParseSortClause ( tQuery, tQuery.m_sOuterOrderBy.cstr(), tRes.m_tSchema, eFunc, tReorder, dExtraExprs, true, nullptr, tRes.m_sError );
if ( eRes==SORT_CLAUSE_RANDOM )
tRes.m_sError = "order by rand() not supported in outer select";
if ( eRes!=SORT_CLAUSE_OK )
return false;
assert ( eFunc==FUNC_GENERIC1 ||eFunc==FUNC_GENERIC2 || eFunc==FUNC_GENERIC3 || eFunc==FUNC_GENERIC4 || eFunc==FUNC_GENERIC5 );
auto& dMatches = tRes.m_dResults.First().m_dMatches;
sphSort ( dMatches.Begin(), dMatches.GetLength(), tReorder, MatchSortAccessor_t() );
return true;
}
void ComputePostlimit ( AggrResult_t & tRes, const CSphQuery & tQuery, bool bMaster )
{
assert ( tRes.m_bSingle );
assert ( tRes.m_bOneSchema );
assert ( !tRes.m_dResults.IsEmpty () );
CSphVector<const CSphColumnInfo *> dPostlimit;
ExtractPostlimit ( tRes.m_tSchema, bMaster, dPostlimit );
// post compute matches only between offset..limit
// however at agent we can't estimate limit.offset at master merged result set
// but master don't provide offset to agents only offset+limit as limit
// so computing all matches from 0 up to inner.limit/outer.limit
assert ( tRes.GetLength ()==tRes.m_dResults.First().m_dMatches.GetLength() );
int iOff = Max ( tQuery.m_iOffset, tQuery.m_iOuterOffset );
int iLimit = ( tQuery.m_iOuterLimit ? tQuery.m_iOuterLimit : tQuery.m_iLimit );
if ( tRes.m_bTagsAssigned )
ProcessMultiPostlimit ( tRes, dPostlimit, tQuery.m_sQuery.cstr (), iOff, iLimit );
else
ProcessSinglePostlimit ( tRes.m_dResults.First(), dPostlimit, tQuery.m_sQuery.cstr(), iOff, iLimit );
}
int64_t CalcPredictedTimeMsec ( const CSphQueryResultMeta & tMeta )
{
assert ( tMeta.m_bHasPrediction );
int64_t iNanoResult = int64_t(g_iPredictorCostSkip)* tMeta.m_tStats.m_iSkips
+ g_iPredictorCostDoc * tMeta.m_tStats.m_iFetchedDocs
+ g_iPredictorCostHit * tMeta.m_tStats.m_iFetchedHits
+ g_iPredictorCostMatch * tMeta.m_iTotalMatches;
return iNanoResult/1000000;
}
int GetMaxMatches ( int iQueryMaxMatches, const CSphIndex * pIndex )
{
if ( iQueryMaxMatches<=DEFAULT_MAX_MATCHES )
return iQueryMaxMatches;
int64_t iDocs = Min ( (int)INT_MAX, pIndex->GetStats().m_iTotalDocuments ); // clamp to int max
return Min ( iQueryMaxMatches, Max ( iDocs, DEFAULT_MAX_MATCHES ) ); // do not want 0 sorter and sorter longer than query.max_matches
}
} // namespace static
/// merges multiple result sets, remaps columns, does reorder for outer selects
bool MinimizeAggrResult ( AggrResult_t & tRes, const CSphQuery & tQuery, bool bHaveLocals, const sph::StringSet & hExtraColumns, QueryProfile_c * pProfiler, const CSphFilterSettings * pAggrFilter, bool bForceRefItems, bool bMaster )
{
bool bReturnZeroCount = !tRes.m_dZeroCount.IsEmpty();
bool bQueryFromAPI = tQuery.m_eQueryType==QUERY_API;
// 0 matches via SphinxAPI? no fiddling with schemes is necessary
// (and via SphinxQL, we still need to return the right schema)
// 0 result set schemes via SphinxQL? just bail
if ( tRes.IsEmpty() && ( bQueryFromAPI || !bReturnZeroCount ) )
{
Debug ( tRes.m_bSingle = true; )
if ( !tRes.m_dResults.IsEmpty () )
{
tRes.m_tSchema = tRes.m_dResults.First ().m_tSchema;
Debug( tRes.m_bOneSchema = true; )
}
return true;
}
Debug ( tRes.m_bSingle = tRes.m_dResults.GetLength ()==1; )
// build a minimal schema over all the (potentially different) schemes
// that we have in our aggregated result set
assert ( tRes.m_dResults.GetLength() || bReturnZeroCount );
bool bAllEqual = MinimizeSchemas(tRes);
Debug ( tRes.m_bOneSchema = tRes.m_bSingle; )
const CSphVector<CSphQueryItem> & dQueryItems = ( tQuery.m_bFacet || tQuery.m_bFacetHead || bForceRefItems ) ? tQuery.m_dRefItems : tQuery.m_dItems;
// build a list of select items that the query asked for
bool bHaveExprs = false;
CSphVector<CSphQueryItem> tExtItems;
const CSphVector<CSphQueryItem> & dItems = ExpandAsterisk ( tRes.m_tSchema, dQueryItems, tExtItems, tQuery.m_bFacetHead, bHaveExprs );
// api + index without attributes + select * case
// can not skip aggregate filtering
if ( bQueryFromAPI && dItems.IsEmpty() && !pAggrFilter && !bHaveExprs )
{
tRes.ClampAllMatches();
return true;
}
// build the final schemas!
FrontendSchemaBuilder_c tFrontendBuilder ( tRes, tQuery, dItems, dQueryItems, hExtraColumns, bQueryFromAPI, bHaveLocals );
if ( !tFrontendBuilder.Build ( bMaster, tRes.m_sError ) )
return false;
// tricky bit
// in purely distributed case, all schemas are received from the wire, and miss aggregate functions info
// thus, we need to re-assign that info
if ( !bHaveLocals )
RecoverAggregateFunctions ( tQuery, tRes );
// if there's more than one result set,
// we now have to merge and order all the matches
// this is a good time to apply outer order clause, too
if ( tRes.m_iSuccesses>1 || pAggrFilter )
{
if ( !MergeAllMatches ( tRes, tQuery, bHaveLocals, bAllEqual, bMaster, pAggrFilter, pProfiler ) )
return false;
} else
{
tRes.m_dResults.First().m_iTag = 0;
Debug ( tRes.m_bTagsCompacted = true );
Debug ( tRes.m_bIdxByTag = true; )
}
// apply outer order clause to single result set
// (multiple combined sets just got reordered above)
// apply inner limit first
if ( tRes.m_iSuccesses==1 && tQuery.m_bHasOuter )
{
tRes.ClampMatches ( tQuery.m_iLimit );
if ( !tQuery.m_sOuterOrderBy.IsEmpty() )
{
if ( !ApplyOuterOrder ( tRes, tQuery ) )
return false;
}
Debug ( tRes.m_bSingle = true; )
Debug ( tRes.m_bTagsCompacted = true );
Debug ( tRes.m_bIdxByTag = true; )
}
if ( bAllEqual && bHaveLocals )
{
CSphScopedProfile tProf ( pProfiler, SPH_QSTATE_EVAL_POST );
ComputePostlimit ( tRes, tQuery, bMaster );
}
if ( bMaster )
{
CSphScopedProfile tProf ( pProfiler, SPH_QSTATE_EVAL_GETFIELD );
RemotesGetField ( tRes, tQuery );
}
// all the merging and sorting is now done
// replace the minimized matches schema with its subset, the result set schema
CSphSchema tOldSchema = tRes.m_tSchema;
tFrontendBuilder.PopulateSchema ( tRes.m_tSchema );
if ( tRes.m_iSuccesses==1 )
RemapNullMask ( tRes.m_dResults[0].m_dMatches, tOldSchema, tRes.m_tSchema );
return true;
}
/////////////////////////////////////////////////////////////////////////////
struct LocalIndex_t
{
CSphString m_sName;
CSphString m_sParentIndex;
int m_iOrderTag = 0;
int m_iWeight = 1;
int64_t m_iMass = 0;
};
struct QueryStat_t
{
uint64_t m_uQueryTime = 0;
uint64_t m_uFoundRows = 0;
int m_iSuccesses = 0;
};
struct StatsPerQuery_t
{
CSphVector<QueryStat_t> m_dStats;
};
struct DistrServedByAgent_t : StatsPerQuery_t
{
CSphString m_sIndex;
CSphVector<int> m_dAgentIds;
StrVec_t m_dLocalNames;
};
/// manage collection of indexes (to keep them alive)
/// Get(name) - returns an index from collection.
/// AddUniqIndex(name) - add local idx to collection, addref is implied by design
/// AddIndex(name,pidx) - add custom idx, to make it available with Get()
class KeepCollection_c : public ISphNoncopyable
{
SmallStringHash_T<cServedIndexRefPtr_c> m_hIndexes;
public:
// add from globally served
bool AddUniqIndex ( const CSphString& sName );
// add custom
void AddIndex ( const CSphString& sName, cServedIndexRefPtr_c pIdx );
// use idx
cServedIndexRefPtr_c Get ( const CSphString &sName ) const;
};
struct LocalSearchRef_t;
class GlobalSorters_c;
class SearchHandler_c
{
public:
SearchHandler_c ( int iQueries, std::unique_ptr<QueryParser_i> pParser, QueryType_e eQueryType, bool bMaster );
~SearchHandler_c();
void RunQueries (); ///< run all queries, get all results
void RunCollect ( const CSphQuery & tQuery, const CSphString & sIndex, CSphString * pErrors, CSphVector<BYTE> * pCollectedDocs );
void SetQuery ( int iQuery, const CSphQuery & tQuery, std::unique_ptr<ISphTableFunc> pTableFunc );
void SetQueryParser ( std::unique_ptr<QueryParser_i> pParser, QueryType_e eQueryType );
void SetProfile ( QueryProfile_c * pProfile );
AggrResult_t * GetResult ( int iResult ) { return m_dAggrResults.Begin() + iResult; }
void SetFederatedUser () { m_bFederatedUser = true; }
public:
CSphVector<CSphQuery> m_dQueries; ///< queries which i need to search
CSphVector<AggrResult_t> m_dAggrResults; ///< results which i obtained
CSphVector<StatsPerQuery_t> m_dQueryIndexStats; ///< statistics for current query
CSphVector<SearchFailuresLog_c> m_dFailuresSet; ///< failure logs for each query
CSphVector<CSphVector<int64_t>> m_dAgentTimes; ///< per-agent time stats
KeepCollection_c m_dAcquired; /// locked indexes
CSphFixedVector<std::unique_ptr<ISphTableFunc>> m_dTables;
SqlStmt_t * m_pStmt = nullptr; ///< original (one) statement to take extra options
protected:
void RunSubset ( int iStart, int iEnd ); ///< run queries against index(es) from first query in the subset
void RunLocalSearches();
bool AllowsMulti() const;
void SetupLocalDF();
bool m_bMultiQueue = false; ///< whether current subset is subject to multi-queue optimization
bool m_bFacetQueue = false; ///< whether current subset is subject to facet-queue optimization
CSphVector<LocalIndex_t> m_dLocal; ///< local indexes for the current subset
StrVec_t m_dExtraSchema; ///< the extra attrs for agents. One vec per index*threads
CSphVector<BYTE> * m_pCollectedDocs = nullptr; ///< this query is for deleting
QueryProfile_c * m_pProfile = nullptr;
QueryType_e m_eQueryType {QUERY_API}; ///< queries from sphinxql require special handling
std::unique_ptr<QueryParser_i> m_pQueryParser; ///< parser used for queries in this handler. e.g. plain or json-style
bool m_bNeedDocIDs = false; ///< do we need docids returned from local searches (remotes return them anyway)?
// FIXME!!! breaks for dist threads with SNIPPETS expressions for queries to multiple indexes
mutable ExprHook_c m_tHook;
SmallStringHash_T < int64_t > m_hLocalDocs;
int64_t m_iTotalDocs = 0;
bool m_bGotLocalDF = false;
bool m_bMaster;
bool m_bFederatedUser;
bool m_bQueryLog = true;
void OnRunFinished ();
private:
CSphVector<CSphQueryResult> m_dResults;
VecTraits_T<CSphQuery> m_dNQueries; ///< working subset of queries
VecTraits_T<AggrResult_t> m_dNAggrResults; ///< working subset of results
VecTraits_T<CSphQueryResult> m_dNResults; ///< working subset of result pointers
VecTraits_T<SearchFailuresLog_c> m_dNFailuresSet; ///< working subset of failures
struct IndexPSInfo_t
{
int m_iThreads = 0; // threads per index
int m_iMaxThreads = 0; // max threads per index (used for consistency between GetPseudoShardingMetric() and SpawnIterators()
bool m_bForceSingleThread = false; // for disk chunks; means "run all disk chunk searches in a single thread"
};
CSphVector<IndexPSInfo_t> m_dPSInfo;
StringBuilder_c m_sError;
private:
struct JoinedServedIndex_t
{
cServedIndexRefPtr_c m_pServed;
int m_iDupeId = -1;
};
bool ParseSysVar();
bool ParseIdxSubkeys();
bool CheckMultiQuery() const;
bool AcquireInvokedIndexes();
void UniqLocals ( VecTraits_T<LocalIndex_t>& dLocals );
void RunActionQuery ( const CSphQuery & tQuery, const CSphString & sIndex, CSphString * pErrors ); ///< run delete/update
bool BuildIndexList ( int & iDivideLimits, VecRefPtrsAgentConn_t & dRemotes, CSphVector<DistrServedByAgent_t> & dDistrServedByAgent ); // fixme!
void CalcTimeStats ( int64_t tmCpu, int64_t tmSubset, const CSphVector<DistrServedByAgent_t> & dDistrServedByAgent );
void CalcPerIndexStats ( const CSphVector<DistrServedByAgent_t> & dDistrServedByAgent ) const;
void CalcGlobalStats ( int64_t tmCpu, int64_t tmSubset, int64_t tmLocal, const CSphIOStats & tIO, const VecRefPtrsAgentConn_t & dRemotes ) const;
int CreateSorters ( const CSphIndex * pIndex, CSphVector<const CSphIndex*> & dJoinedIndexes, VecTraits_T<ISphMatchSorter*> & dSorters, VecTraits_T<CSphString> & dErrors, StrVec_t * pExtra, SphQueueRes_t & tQueueRes, ISphExprHook * pHook ) const;
int CreateSingleSorters ( const CSphIndex * pIndex, CSphVector<const CSphIndex*> & dJoinedIndexes, VecTraits_T<ISphMatchSorter*> & dSorters, VecTraits_T<CSphString> & dErrors, StrVec_t * pExtra, SphQueueRes_t & tQueueRes, ISphExprHook * pHook ) const;
int CreateMultiQueryOrFacetSorters ( const CSphIndex * pIndex, CSphVector<const CSphIndex*> & dJoinedIndexes, VecTraits_T<ISphMatchSorter*> & dSorters, VecTraits_T<CSphString> & dErrors, StrVec_t * pExtra, SphQueueRes_t & tQueueRes, ISphExprHook * pHook ) const;
SphQueueSettings_t MakeQueueSettings ( const CSphIndex * pIndex, const CSphIndex * pJoinedIndex, int iMaxMatches, bool bForceSingleThread, ISphExprHook * pHook ) const;
cServedIndexRefPtr_c CheckIndexSelectable ( const CSphString& sLocal, const char * szParent, VecTraits_T<SearchFailuresLog_c> * pNFailuresSet=nullptr ) const;
bool PopulateJoinedIndexes ( CSphVector<JoinedServedIndex_t> & dJoinedServed, VecTraits_T<SearchFailuresLog_c> & dFailuresSet ) const;
CSphVector<const CSphIndex*> GetRlockedJoinedIndexes ( const CSphVector<JoinedServedIndex_t> & dJoinedServed, std::vector<RIdx_c> & dRLockedJoined ) const;
bool CreateValidSorters ( VecTraits_T<ISphMatchSorter *> & dSrt, SphQueueRes_t * pQueueRes, VecTraits_T<SearchFailuresLog_c> & dFlr, StrVec_t * pExtra, const CSphIndex* pIndex, CSphVector<const CSphIndex*> & dJoinedIndexes, const CSphString & sLocal, const char * szParent, ISphExprHook * pHook );
void PopulateCountDistinct ( CSphVector<CSphVector<int64_t>> & dCountDistinct ) const;
int CalcMaxThreadsPerIndex ( int iConcurrency ) const;
void CalcThreadsPerIndex ( int iConcurrency );
bool SubmitSuccess ( CSphVector<ISphMatchSorter *> & dSorters, GlobalSorters_c & tGlobalSorters, LocalSearchRef_t & tCtx, int64_t & iCpuTime, int iQuery, int iLocal, const CSphQueryResultMeta & tMqMeta, const CSphQueryResult & tMqRes );
};
PubSearchHandler_c::PubSearchHandler_c ( int iQueries, std::unique_ptr<QueryParser_i> pQueryParser, QueryType_e eQueryType, bool bMaster )
: m_pImpl { std::make_unique<SearchHandler_c> ( iQueries, std::move ( pQueryParser ), eQueryType, bMaster ) }
{
assert ( m_pImpl );
}
PubSearchHandler_c::~PubSearchHandler_c () = default;
void PubSearchHandler_c::RunQueries ()
{
m_pImpl->RunQueries();
}
void PubSearchHandler_c::SetQuery ( int iQuery, const CSphQuery & tQuery, std::unique_ptr<ISphTableFunc> pTableFunc )
{
m_pImpl->SetQuery ( iQuery, tQuery, std::move(pTableFunc) );
}
void PubSearchHandler_c::SetProfile ( QueryProfile_c * pProfile )
{
m_pImpl->SetProfile ( pProfile );
}
void PubSearchHandler_c::SetStmt ( SqlStmt_t & tStmt )
{
m_pImpl->m_pStmt = &tStmt;
}
AggrResult_t * PubSearchHandler_c::GetResult ( int iResult )
{
return m_pImpl->GetResult (iResult);
}
void PubSearchHandler_c::PushIndex ( const CSphString& sIndex, const cServedIndexRefPtr_c& pDesc )
{
m_pImpl->m_dAcquired.AddIndex ( sIndex, pDesc );
}
void PubSearchHandler_c::RunCollect ( const CSphQuery& tQuery, const CSphString& sIndex, CSphString* pErrors, CSphVector<BYTE>* pCollectedDocs )
{
m_pImpl->RunCollect ( tQuery, sIndex, pErrors, pCollectedDocs );
}
SearchHandler_c::SearchHandler_c ( int iQueries, std::unique_ptr<QueryParser_i> pQueryParser, QueryType_e eQueryType, bool bMaster )
: m_dTables ( iQueries )
{
m_dQueries.Resize ( iQueries );
m_dAggrResults.Resize ( iQueries );
m_dFailuresSet.Resize ( iQueries );
m_dAgentTimes.Resize ( iQueries );
m_bMaster = bMaster;
m_bFederatedUser = false;
SetQueryParser ( std::move ( pQueryParser ), eQueryType );
m_dResults.Resize ( iQueries );
for ( int i=0; i<iQueries; ++i )
m_dResults[i].m_pMeta = &m_dAggrResults[i];
// initial slices (when nothing explicitly asked)
m_dNQueries = m_dQueries;
m_dNAggrResults = m_dAggrResults;
m_dNResults = m_dResults;
m_dNFailuresSet = m_dFailuresSet;
}
//////////////////
/* Smart gc retire of vec of queries.
* We have CSphVector<CSphQuery> which is over, but some threads may still use separate queries from it, so we can't just
* delete it, since they will loose the objects and it will cause crash.
*
* So, if some queries are still in use, we retire them with custom deleter, which will decrease counter,
* and finally delete whole vec.
*/
class RetireQueriesVec_c
{
CSphVector<CSphQuery> m_dQueries; // given queries I'll finally remove
std::atomic<int> m_iInUse; // how many of them still reffered
void OneQueryDeleted()
{
if ( m_iInUse.fetch_sub ( 1, std::memory_order_release )==1 )
{
assert( m_iInUse.load ( std::memory_order_acquire )==0 );
delete this;
}
}
static void Delete ( void * pArg )
{
if ( pArg )
{
auto pMe = (RetireQueriesVec_c *) ( (CSphQuery *) pArg )->m_pCookie;
assert ( pMe && "Each retiring query from vec must have address of RetireQueriesVec_c in cookie");
if ( pMe )
pMe->OneQueryDeleted ();
}
}
public:
void EngageRetiring ( CSphVector<CSphQuery> dQueries, CSphVector<int> dRetired )
{
assert ( !dRetired.IsEmpty () );
m_iInUse.store ( dRetired.GetLength (), std::memory_order_release );
m_dQueries = std::move ( dQueries );
for ( auto iRetired: dRetired )
{
m_dQueries[iRetired].m_pCookie = this;
hazard::Retire ( (void*) &m_dQueries[iRetired], Delete );
}
}
};
SearchHandler_c::~SearchHandler_c ()
{
auto dPointed = hazard::GetListOfPointed ( m_dQueries );
if ( !dPointed.IsEmpty () )
{
// pQueryHolder will be self-removed when all used queries retired
auto pQueryHolder = new RetireQueriesVec_c;
pQueryHolder->EngageRetiring ( std::move ( m_dQueries ), std::move ( dPointed ) );
}
}
void SearchHandler_c::SetQueryParser ( std::unique_ptr<QueryParser_i> pParser, QueryType_e eQueryType )
{
m_pQueryParser = std::move ( pParser );
m_eQueryType = eQueryType;
for ( auto & dQuery : m_dQueries )
{
dQuery.m_pQueryParser = m_pQueryParser.get();
dQuery.m_eQueryType = eQueryType;
}
}
bool KeepCollection_c::AddUniqIndex ( const CSphString & sName )
{
if ( m_hIndexes.Exists ( sName ) )
return true;
auto pIdx = GetServed ( sName );
if ( !pIdx )
return false;
m_hIndexes.Add ( std::move ( pIdx ), sName );
return true;
}
void KeepCollection_c::AddIndex ( const CSphString & sName, cServedIndexRefPtr_c pIdx )
{
if ( m_hIndexes.Exists ( sName ) )
return;
m_hIndexes.Add ( std::move ( pIdx ), sName );
}
cServedIndexRefPtr_c KeepCollection_c::Get ( const CSphString & sName ) const
{
auto * ppIndex = m_hIndexes ( sName );
assert ( ppIndex && "KeepCollection_c::Get called with absent key");
return *ppIndex;
}
void SearchHandler_c::RunCollect ( const CSphQuery &tQuery, const CSphString &sIndex, CSphString * pErrors, CSphVector<BYTE> * pCollectedDocs )
{
m_bQueryLog = false;
m_pCollectedDocs = pCollectedDocs;
RunActionQuery ( tQuery, sIndex, pErrors );
}
void SearchHandler_c::RunActionQuery ( const CSphQuery & tQuery, const CSphString & sIndex, CSphString * pErrors )
{
SetQuery ( 0, tQuery, nullptr );
m_dQueries[0].m_sIndexes = sIndex;
m_dLocal.Add ().m_sName = sIndex;
CheckQuery ( tQuery, *pErrors );
if ( !pErrors->IsEmpty() )
return;
int64_t tmLocal = -sphMicroTimer();
int64_t tmCPU = -sphTaskCpuTimer ();
RunLocalSearches();
tmLocal += sphMicroTimer();
tmCPU += sphTaskCpuTimer();
OnRunFinished();
auto & tRes = m_dAggrResults[0];
tRes.m_iOffset = tQuery.m_iOffset;
tRes.m_iCount = Max ( Min ( tQuery.m_iLimit, tRes.GetLength()-tQuery.m_iOffset ), 0 );
// actualy tRes.m_iCount=0 since delete/update produces no matches
tRes.m_iQueryTime += (int)(tmLocal/1000);
tRes.m_iCpuTime += tmCPU;
if ( !tRes.m_iSuccesses )
{
StringBuilder_c sFailures;
m_dFailuresSet[0].BuildReport ( sFailures );
sFailures.MoveTo ( *pErrors );
} else if ( !tRes.m_sError.IsEmpty() )
{
StringBuilder_c sFailures;
m_dFailuresSet[0].BuildReport ( sFailures );
sFailures.MoveTo ( tRes.m_sWarning ); // FIXME!!! commit warnings too
}
const CSphIOStats & tIO = tRes.m_tIOStats;
auto & g_tStats = gStats ();
g_tStats.m_iQueries.fetch_add ( 1, std::memory_order_relaxed );
g_tStats.m_iQueryTime.fetch_add ( tmLocal, std::memory_order_relaxed );
g_tStats.m_iQueryCpuTime.fetch_add ( tmLocal, std::memory_order_relaxed );
g_tStats.m_iDiskReads.fetch_add ( tIO.m_iReadOps, std::memory_order_relaxed );
g_tStats.m_iDiskReadTime.fetch_add ( tIO.m_iReadTime, std::memory_order_relaxed );
g_tStats.m_iDiskReadBytes.fetch_add ( tIO.m_iReadBytes, std::memory_order_relaxed );
if ( m_bQueryLog )
LogQuery ( m_dQueries[0], m_dAggrResults[0], m_dAgentTimes[0] );
}
void SearchHandler_c::SetQuery ( int iQuery, const CSphQuery & tQuery, std::unique_ptr<ISphTableFunc> pTableFunc )
{
m_dQueries[iQuery] = tQuery;
m_dQueries[iQuery].m_pQueryParser = m_pQueryParser.get();
m_dQueries[iQuery].m_eQueryType = m_eQueryType;
m_dTables[iQuery] = std::move ( pTableFunc );
}
void SearchHandler_c::SetProfile ( QueryProfile_c * pProfile )
{
assert ( pProfile );
m_pProfile = pProfile;
}
void SearchHandler_c::RunQueries()
{
// batch queries to same index(es)
// or work each query separately if indexes are different
int iStart = 0;
ARRAY_FOREACH ( i, m_dQueries )
{
if ( m_dQueries[i].m_sIndexes!=m_dQueries[iStart].m_sIndexes )
{
RunSubset ( iStart, i );
iStart = i;
}
}
RunSubset ( iStart, m_dQueries.GetLength() );
if ( m_bQueryLog )
{
ARRAY_FOREACH ( i, m_dQueries )
LogQuery ( m_dQueries[i], m_dAggrResults[i], m_dAgentTimes[i] );
}
// no need to call OnRunFinished() as meta.matches already calculated at search
}
// final fixup
void SearchHandler_c::OnRunFinished()
{
for ( auto & tResult : m_dAggrResults )
tResult.m_iMatches = tResult.GetLength();
}
// return sequence of columns as 'show create table', or 'describe' reveal
static StrVec_t GetDefaultSchema ( const CSphIndex* pIndex )
{
StrVec_t dRes;
auto& tSchema = pIndex->GetMatchSchema();
if ( tSchema.GetAttrsCount()==0 )
return dRes;
assert ( tSchema.GetAttr ( 0 ).m_sName == sphGetDocidName() );
const auto& tId = tSchema.GetAttr ( 0 );
dRes.Add ( tId.m_sName );
for ( int i = 0; i < tSchema.GetFieldsCount(); ++i )
{
const auto& tField = tSchema.GetField ( i );
dRes.Add ( tField.m_sName );
}
for ( int i = 1; i < tSchema.GetAttrsCount(); ++i ) // from 1, as 0 is docID and already emerged
{
const auto& tAttr = tSchema.GetAttr ( i );
if ( sphIsInternalAttr ( tAttr ) )
continue;
if ( tSchema.GetField ( tAttr.m_sName.cstr() ) )
continue; // already described it as a field property
dRes.Add ( tAttr.m_sName );
}
return dRes;
}
SphQueueSettings_t SearchHandler_c::MakeQueueSettings ( const CSphIndex * pIndex, const CSphIndex * pJoinedIndex, int iMaxMatches, bool bForceSingleThread, ISphExprHook * pHook ) const
{
auto& tSess = session::Info();
SphQueueSettings_t tQS ( pIndex->GetMatchSchema (), m_pProfile, tSess.m_pSqlRowBuffer, &tSess.m_pSessionOpaque1, &tSess.m_pSessionOpaque2 );
tQS.m_bComputeItems = true;
tQS.m_pCollection = m_pCollectedDocs;
tQS.m_pHook = pHook;
tQS.m_iMaxMatches = GetMaxMatches ( iMaxMatches, pIndex );
tQS.m_bNeedDocids = m_bNeedDocIDs; // need docids to merge results from indexes
tQS.m_fnGetCountDistinct = [pIndex]( const CSphString & sAttr, CSphString & sModifiedAttr ){ return pIndex->GetCountDistinct ( sAttr, sModifiedAttr ); };
tQS.m_fnGetCountFilter = [pIndex]( const CSphFilterSettings & tFilter, CSphString & sModifiedAttr ){ return pIndex->GetCountFilter ( tFilter, sModifiedAttr ); };
tQS.m_fnGetCount = [pIndex](){ return pIndex->GetCount(); };
tQS.m_bEnableFastDistinct = m_dLocal.GetLength()<=1;
tQS.m_bForceSingleThread = bForceSingleThread;
tQS.m_dCreateSchema = GetDefaultSchema ( pIndex );
if ( pJoinedIndex )
tQS.m_pJoinArgs = std::make_unique<JoinArgs_t> ( pJoinedIndex->GetMatchSchema(), pIndex->GetName(), pJoinedIndex->GetName() );
return tQS;
}
int SearchHandler_c::CreateMultiQueryOrFacetSorters ( const CSphIndex * pIndex, CSphVector<const CSphIndex*> & dJoinedIndexes, VecTraits_T<ISphMatchSorter *> & dSorters, VecTraits_T<CSphString> & dErrors, StrVec_t * pExtra, SphQueueRes_t & tQueueRes, ISphExprHook * pHook ) const
{
int iValidSorters = 0;
auto tQueueSettings = MakeQueueSettings ( pIndex, dJoinedIndexes[0], m_dNQueries.First ().m_iMaxMatches, m_dPSInfo.First().m_bForceSingleThread, pHook );
sphCreateMultiQueue ( tQueueSettings, m_dNQueries, dSorters, dErrors, tQueueRes, pExtra, m_pProfile );
m_dNQueries.First().m_bZSlist = tQueueRes.m_bZonespanlist;
dSorters.Apply ( [&iValidSorters] ( const ISphMatchSorter * pSorter ) {
if ( pSorter )
++iValidSorters;
} );
if ( m_bFacetQueue && iValidSorters<dSorters.GetLength () )
{
dSorters.Apply ( [] ( ISphMatchSorter *& pSorter ) { SafeDelete (pSorter); } );
return 0;
}
if ( m_bFacetQueue && !CreateJoinMultiSorter ( pIndex, dJoinedIndexes[0], tQueueSettings, m_dNQueries, dSorters, dErrors[0] ) )
{
dSorters.Apply ( [] ( ISphMatchSorter *& pSorter ) { SafeDelete (pSorter); } );
return 0;
}
return iValidSorters;
}
int SearchHandler_c::CreateSingleSorters ( const CSphIndex * pIndex, CSphVector<const CSphIndex*> & dJoinedIndexes, VecTraits_T<ISphMatchSorter *> & dSorters, VecTraits_T<CSphString> & dErrors, StrVec_t * pExtra, SphQueueRes_t & tQueueRes, ISphExprHook * pHook ) const
{
int iValidSorters = 0;
tQueueRes.m_bAlowMulti = false;
const int iQueries = m_dNQueries.GetLength();
for ( int iQuery = 0; iQuery<iQueries; ++iQuery )
{
CSphQuery & tQuery = m_dNQueries[iQuery];
// create queue
auto tQueueSettings = MakeQueueSettings ( pIndex, dJoinedIndexes[iQuery], tQuery.m_iMaxMatches, m_dPSInfo.First().m_bForceSingleThread, pHook );
ISphMatchSorter * pSorter = sphCreateQueue ( tQueueSettings, tQuery, dErrors[iQuery], tQueueRes, pExtra, m_pProfile );
if ( !pSorter )
continue;
// possibly create a wrapper (if we have JOIN)
pSorter = CreateJoinSorter ( pIndex, dJoinedIndexes[iQuery], tQueueSettings, tQuery, pSorter, tQueueRes.m_bJoinedGroupSort, dErrors[iQuery] );
if ( !pSorter )
continue;
tQuery.m_bZSlist = tQueueRes.m_bZonespanlist;
dSorters[iQuery] = pSorter;
++iValidSorters;
}
return iValidSorters;
}
int SearchHandler_c::CreateSorters ( const CSphIndex * pIndex, CSphVector<const CSphIndex*> & dJoinedIndexes, VecTraits_T<ISphMatchSorter *> & dSorters, VecTraits_T<CSphString> & dErrors, StrVec_t* pExtra, SphQueueRes_t & tQueueRes, ISphExprHook * pHook ) const
{
if ( m_bMultiQueue || m_bFacetQueue )
return CreateMultiQueryOrFacetSorters ( pIndex, dJoinedIndexes, dSorters, dErrors, pExtra, tQueueRes, pHook );
return CreateSingleSorters ( pIndex, dJoinedIndexes, dSorters, dErrors, pExtra, tQueueRes, pHook );
}
struct LocalSearchRef_t
{
ExprHook_c& m_tHook;
StrVec_t* m_pExtra;
VecTraits_T<SearchFailuresLog_c>& m_dFailuresSet;
VecTraits_T<AggrResult_t>& m_dAggrResults;
VecTraits_T<CSphQueryResult>& m_dResults;
LocalSearchRef_t ( ExprHook_c & tHook, StrVec_t* pExtra, VecTraits_T<SearchFailuresLog_c> & dFailures, VecTraits_T<AggrResult_t> & dAggrResults, VecTraits_T<CSphQueryResult> & dResults )
: m_tHook ( tHook )
, m_pExtra ( pExtra )
, m_dFailuresSet ( dFailures )
, m_dAggrResults ( dAggrResults )
, m_dResults ( dResults )
{}
void MergeChild ( LocalSearchRef_t dChild ) const
{
if ( m_pExtra )
{
assert ( dChild.m_pExtra );
m_pExtra->Append ( *dChild.m_pExtra );
}
auto & dChildAggrResults = dChild.m_dAggrResults;
for ( int i = 0, iQueries = m_dAggrResults.GetLength (); i<iQueries; ++i )
{
auto & tResult = m_dAggrResults[i];
auto & tChild = dChildAggrResults[i];
tResult.m_dResults.Append ( tChild.m_dResults );
// word statistics
tResult.MergeWordStats ( tChild );
// other data (warnings, errors, etc.)
// errors
if ( !tChild.m_sError.IsEmpty ())
tResult.m_sError = tChild.m_sError;
// warnings
if ( !tChild.m_sWarning.IsEmpty ())
tResult.m_sWarning = tChild.m_sWarning;
// prediction counters
tResult.m_bHasPrediction |= tChild.m_bHasPrediction;
if ( tChild.m_bHasPrediction )
{
tResult.m_tStats.Add ( tChild.m_tStats );
tResult.m_iPredictedTime = CalcPredictedTimeMsec ( tResult );
}
// profiling
if ( tChild.m_pProfile )
tResult.m_pProfile->AddMetric ( *tChild.m_pProfile );
tResult.m_iCpuTime += tChild.m_iCpuTime;
tResult.m_iTotalMatches += tChild.m_iTotalMatches;
tResult.m_bTotalMatchesApprox |= tChild.m_bTotalMatchesApprox;
tResult.m_iSuccesses += tChild.m_iSuccesses;
tResult.m_tIOStats.Add ( tChild.m_tIOStats );
tResult.m_tIteratorStats.Merge ( tChild.m_tIteratorStats );
// failures
m_dFailuresSet[i].Append ( dChild.m_dFailuresSet[i] );
}
}
inline static bool IsClonable()
{
return true;
}
};
struct LocalSearchClone_t
{
ExprHook_c m_tHook;
StrVec_t m_dExtra;
StrVec_t* m_pExtra;
CSphVector<SearchFailuresLog_c> m_dFailuresSet;
CSphVector<AggrResult_t> m_dAggrResults;
CSphVector<CSphQueryResult> m_dResults;
explicit LocalSearchClone_t ( const LocalSearchRef_t & dParent)
{
int iQueries = dParent.m_dFailuresSet.GetLength ();
m_dFailuresSet.Resize ( iQueries );
m_dAggrResults.Resize ( iQueries );
m_dResults.Resize ( iQueries );
for ( int i=0; i<iQueries; ++i )
m_dResults[i].m_pMeta = &m_dAggrResults[i];
m_pExtra = dParent.m_pExtra ? &m_dExtra : nullptr;
// set profiler complementary to one in RunSubset (search by `if ( iQueries==1 && m_pProfile )` clause)
if ( iQueries==1 && dParent.m_dAggrResults.First ().m_pProfile )
{
auto pProfile = new QueryProfile_c;
m_dAggrResults.First().m_pProfile = pProfile;
m_tHook.SetProfiler ( pProfile );
}
}
explicit operator LocalSearchRef_t ()
{
return { m_tHook, m_pExtra, m_dFailuresSet, m_dAggrResults, m_dResults };
}
~LocalSearchClone_t()
{
if ( !m_dAggrResults.IsEmpty () )
SafeDelete ( m_dAggrResults.First().m_pProfile );
}
};
cServedIndexRefPtr_c SearchHandler_c::CheckIndexSelectable ( const CSphString & sLocal, const char * szParent, VecTraits_T<SearchFailuresLog_c> * pNFailuresSet ) const
{
const auto& pServed = m_dAcquired.Get ( sLocal );
assert ( pServed );
if ( !ServedDesc_t::IsSelectable ( pServed ) )
{
if ( pNFailuresSet )
for ( auto & dFailureSet : *pNFailuresSet )
dFailureSet.SubmitEx ( sLocal, nullptr, "%s", "table is not suitable for select" );
return cServedIndexRefPtr_c{};
}
return pServed;
}
bool SearchHandler_c::PopulateJoinedIndexes ( CSphVector<JoinedServedIndex_t> & dJoinedServed, VecTraits_T<SearchFailuresLog_c> & dFailuresSet ) const
{
dJoinedServed.Resize ( m_dNQueries.GetLength() );
ARRAY_FOREACH ( i, m_dNQueries )
{
const auto & tQuery = m_dNQueries[i];
if ( tQuery.m_sJoinIdx.IsEmpty() )
continue;
const auto & pServed = m_dAcquired.Get ( tQuery.m_sJoinIdx );
if ( !pServed )
{
for ( auto & dFailureSet : dFailuresSet )
dFailureSet.SubmitEx ( tQuery.m_sJoinIdx, nullptr, "%s", "table not found" );
return false;
}
if ( !ServedDesc_t::IsSelectable ( pServed ) )
{
for ( auto & dFailureSet : dFailuresSet )
dFailureSet.SubmitEx ( tQuery.m_sJoinIdx, nullptr, "%s", "table is not suitable for select" );
return false;
}
dJoinedServed[i] = { pServed, -1 };
}
ARRAY_FOREACH ( i, dJoinedServed )
{
for ( int j = i+1; j < dJoinedServed.GetLength(); j++ )
if ( dJoinedServed[j].m_pServed == dJoinedServed[i].m_pServed )
{
dJoinedServed[j].m_iDupeId = i;
break;
}
}
return true;
}
bool SearchHandler_c::CreateValidSorters ( VecTraits_T<ISphMatchSorter *> & dSrt, SphQueueRes_t * pQueueRes, VecTraits_T<SearchFailuresLog_c> & dFlr, StrVec_t * pExtra, const CSphIndex * pIndex, CSphVector<const CSphIndex*> & dJoinedIndexes, const CSphString & sLocal, const char * szParent, ISphExprHook * pHook )
{
auto iQueries = dSrt.GetLength();
#if PARANOID
for ( const auto* pSorter : dSrt)
assert ( !pSorter );
#endif
CSphFixedVector<CSphString> dErrors ( iQueries );
int iValidSorters = CreateSorters ( pIndex, dJoinedIndexes, dSrt, dErrors, pExtra, *pQueueRes, pHook );
if ( iValidSorters<dSrt.GetLength() )
{
ARRAY_FOREACH ( i, dErrors )
{
if ( !dErrors[i].IsEmpty () )
dFlr[i].Submit ( sLocal, szParent, dErrors[i].cstr () );
}
}
m_bMultiQueue = pQueueRes->m_bAlowMulti;
return !!iValidSorters;
}
void SearchHandler_c::PopulateCountDistinct ( CSphVector<CSphVector<int64_t>> & dCountDistinct ) const
{
dCountDistinct.Resize ( m_dLocal.GetLength() );
ARRAY_FOREACH ( iLocal, m_dLocal )
{
const LocalIndex_t & tLocal = m_dLocal[iLocal];
auto pIndex = CheckIndexSelectable ( tLocal.m_sName, tLocal.m_sParentIndex.cstr(), nullptr );
if ( !pIndex )
continue;
auto & dIndexCountDistinct = dCountDistinct[iLocal];
dIndexCountDistinct.Resize ( m_dNQueries.GetLength() );
dIndexCountDistinct.Fill(-1);
ARRAY_FOREACH ( i, dIndexCountDistinct )
{
auto & tQuery = m_dNQueries[i];
int iGroupby = GetAliasedAttrIndex ( tQuery.m_sGroupBy, tQuery, RIdx_c(pIndex)->GetMatchSchema() );
if ( iGroupby<0 )
continue;
auto & sAttr = RIdx_c(pIndex)->GetMatchSchema().GetAttr(iGroupby).m_sName;
CSphString sModifiedAttr;
dIndexCountDistinct[i] = RIdx_c(pIndex)->GetCountDistinct ( sAttr, sModifiedAttr );
}
}
}
int SearchHandler_c::CalcMaxThreadsPerIndex ( int iConcurrency ) const
{
int iNumValid = 0;
ARRAY_FOREACH ( i, m_dLocal )
{
auto pIndex = CheckIndexSelectable ( m_dLocal[i].m_sName, m_dLocal[i].m_sParentIndex.cstr(), nullptr );
if ( !pIndex )
continue;
iNumValid++;
}
return ::CalcMaxThreadsPerIndex ( iConcurrency, iNumValid );
}
void SearchHandler_c::CalcThreadsPerIndex ( int iConcurrency )
{
if ( !iConcurrency )
iConcurrency = g_iThreads;
int iBusyWorkers = Max ( GlobalWorkPool()->CurTasks() - 1, 0 ); // ignore current task
int iAvailableWorkers = Max ( iConcurrency-iBusyWorkers, 1 );
CSphVector<CSphVector<int64_t>> dCountDistinct;
PopulateCountDistinct ( dCountDistinct );
int iMaxThreadsPerIndex = CalcMaxThreadsPerIndex ( iAvailableWorkers );
CSphVector<SplitData_t> dSplitData ( m_dLocal.GetLength() );
int iEnabledIndexes = 0;
ARRAY_FOREACH ( iLocal, m_dLocal )
{
const LocalIndex_t & tLocal = m_dLocal[iLocal];
auto pIndex = CheckIndexSelectable ( tLocal.m_sName, tLocal.m_sParentIndex.cstr(), nullptr );
if ( !pIndex )
continue;
iEnabledIndexes++;
auto & tPSInfo = m_dPSInfo[iLocal];
auto & tSplitData = dSplitData[iLocal];
RIdx_c pIdx { pIndex };
if ( GetPseudoSharding () || pIdx->IsRT() )
{
// do metric calcs
tPSInfo.m_iMaxThreads = iMaxThreadsPerIndex;
auto tMetric = pIdx->GetPseudoShardingMetric ( m_dNQueries, dCountDistinct[iLocal], tPSInfo.m_iMaxThreads, tPSInfo.m_bForceSingleThread );
assert ( tMetric.first>=0 );
tSplitData.m_iMetric = tMetric.first;
bool bExplicitConcurrency = m_dNQueries.any_of ( []( auto & tQuery ){ return tQuery.m_iConcurrency>0; } );
tSplitData.m_iThreadCap = bExplicitConcurrency ? 0 : tMetric.second; // ignore thread cap if concurrency is explicitly specified
}
else
{
// don't do metric calcs; we are guaranteed to have one thread
// set the 'force single thread' flag to make sure max_matches won't be increased when it is not necessary
tPSInfo = { 1, 1, true };
tSplitData.m_iThreadCap = 1;
}
}
if ( iAvailableWorkers>iEnabledIndexes )
{
IntVec_t dThreads;
DistributeThreadsOverIndexes ( dThreads, dSplitData, iAvailableWorkers );
ARRAY_FOREACH ( i, dThreads )
m_dPSInfo[i].m_iThreads = dThreads[i];
}
}
class AssignTag_c : public MatchProcessor_i
{
public:
AssignTag_c ( int iTag )
: m_iTag ( iTag )
{}
void Process ( CSphMatch * pMatch ) final { ProcessMatch(pMatch); }
bool ProcessInRowIdOrder() const final { return false; }
void Process ( VecTraits_T<CSphMatch *> & dMatches ) final { dMatches.for_each ( [this]( CSphMatch * pMatch ){ ProcessMatch(pMatch); } ); }
private:
int m_iTag = 0;
inline void ProcessMatch ( CSphMatch * pMatch ) { pMatch->m_iTag = m_iTag; }
};
class GlobalSorters_c
{
public:
GlobalSorters_c ( const VecTraits_T<CSphQuery> & dQueries, const CSphVector<cServedIndexRefPtr_c> & dIndexes )
: m_dQueries ( dQueries )
, m_dSorters { dQueries.GetLength() }
{
auto iValidIndexes = (int)dIndexes.count_of ( [&] ( const auto& pIndex ) { return pIndex; } );
m_bNeedGlobalSorters = iValidIndexes>1 && !dQueries.First().m_sGroupDistinct.IsEmpty();
if ( m_bNeedGlobalSorters )
{
// check if schemas are same
const CSphSchema * pFirstSchema = nullptr;
for ( auto i : dIndexes )
{
if ( !i )
continue;
if ( !pFirstSchema )
{
pFirstSchema = &RIdx_c ( i )->GetMatchSchema();
continue;
}
CSphString sCmpError;
if ( !pFirstSchema->CompareTo ( RIdx_c ( i )->GetMatchSchema(), sCmpError ) )
{
m_bNeedGlobalSorters = false;
break;
}
}
}
for ( auto & i : m_dSorters )
i.Resize ( dIndexes.GetLength() );
}
~GlobalSorters_c()
{
for ( auto & i : m_dSorters )
for ( auto & j : i )
SafeDelete ( j.m_pSorter );
}
bool StoreSorter ( int iQuery, int iIndex, ISphMatchSorter * & pSorter, const DocstoreReader_i * pDocstore, int iTag )
{
// FACET head is the plain query wo group sorter and can not move all result set into single sorter
// could be replaced with !pSorter->IspSorter->IsGroupby()
if ( !NeedGlobalSorters() || m_dQueries[iQuery].m_bFacetHead )
return false;
// take ownership of the sorter
m_dSorters[iQuery][iIndex] = { pSorter, pDocstore, iTag };
pSorter = nullptr;
return true;
}
bool NeedGlobalSorters() const
{
return m_bNeedGlobalSorters;
}
void MergeResults ( VecTraits_T<AggrResult_t> & dResults )
{
if ( !NeedGlobalSorters() )
return;
ARRAY_FOREACH ( iQuery, m_dSorters )
{
CSphVector<ISphMatchSorter *> dValidSorters;
for ( auto i : m_dSorters[iQuery] )
{
if ( !i.m_pSorter )
continue;
dValidSorters.Add ( i.m_pSorter );
// assign order tag here so we can link to docstore later
AssignTag_c tAssign ( i.m_iTag );
i.m_pSorter->Finalize ( tAssign, false, false );
}
int iNumIndexes = dValidSorters.GetLength();
if ( !iNumIndexes )
continue;
ISphMatchSorter * pLastSorter = dValidSorters[iNumIndexes-1];
// merge all results to the last sorter. this is done to try to keep some compatibility with no-global-sorters code branch
for ( int iIndex = iNumIndexes-2; iIndex>=0; iIndex-- )
dValidSorters[iIndex]->MoveTo ( pLastSorter, true );
dResults[iQuery].m_iTotalMatches = pLastSorter->GetTotalCount();
dResults[iQuery].AddResultset ( pLastSorter, m_dSorters[iQuery][0].m_pDocstore, m_dSorters[iQuery][0].m_iTag, m_dQueries[iQuery].m_iCutoff );
// we already assigned index/docstore tags to all matches; no need to do it again
if ( dResults[iQuery].m_dResults.GetLength() )
dResults[iQuery].m_dResults[0].m_bTagsAssigned = true;
// add fake empty result sets (for tag->docstore lookup)
for ( int i = 1; i < m_dSorters[iQuery].GetLength(); i++ )
dResults[iQuery].AddEmptyResultset ( m_dSorters[iQuery][i].m_pDocstore, m_dSorters[iQuery][i].m_iTag );
}
}
private:
struct SorterData_t
{
ISphMatchSorter * m_pSorter = nullptr;
const DocstoreReader_i * m_pDocstore = nullptr;
int m_iTag = 0;
};
const VecTraits_T<CSphQuery> & m_dQueries;
CSphVector<CSphVector<SorterData_t>> m_dSorters;
bool m_bNeedGlobalSorters = false;
};
CSphVector<const CSphIndex*> SearchHandler_c::GetRlockedJoinedIndexes ( const CSphVector<JoinedServedIndex_t> & dJoinedServed, std::vector<RIdx_c> & dRLockedJoined ) const
{
CSphVector<const CSphIndex*> dJoinedIndexes;
for ( auto & i : dJoinedServed )
{
if ( !i.m_pServed )
{
dJoinedIndexes.Add(nullptr);
continue;
}
if ( i.m_iDupeId!=-1 )
dJoinedIndexes.Add ( dJoinedIndexes[i.m_iDupeId] );
else
{
dRLockedJoined.emplace_back ( i.m_pServed );
dJoinedIndexes.Add ( dRLockedJoined.back() );
}
}
return dJoinedIndexes;
}
bool SearchHandler_c::SubmitSuccess ( CSphVector<ISphMatchSorter *> & dSorters, GlobalSorters_c & tGlobalSorters, LocalSearchRef_t & tCtx, int64_t & iCpuTime, int iQuery, int iLocal, const CSphQueryResultMeta & tMqMeta, const CSphQueryResult & tMqRes )
{
auto & dNFailuresSet = tCtx.m_dFailuresSet;
auto & dNAggrResults = tCtx.m_dAggrResults;
auto & dNResults = tCtx.m_dResults;
int iNumQueries = m_dNQueries.GetLength();
const LocalIndex_t & tLocal = m_dLocal[iLocal];
const CSphString & sLocal = tLocal.m_sName;
const char * szParent = tLocal.m_sParentIndex.cstr();
int iOrderTag = tLocal.m_iOrderTag;
ISphMatchSorter * pSorter = dSorters[iQuery];
AggrResult_t & tNRes = dNAggrResults[iQuery];
int iQTimeForStats = tNRes.m_iQueryTime;
auto pDocstore = m_bMultiQueue ? tMqRes.m_pDocstore : dNResults[iQuery].m_pDocstore;
// multi-queue only returned one result set meta, so we need to replicate it
if ( m_bMultiQueue )
{
// these times will be overridden below, but let's be clean
iQTimeForStats = tMqMeta.m_iQueryTime / iNumQueries;
tNRes.m_iQueryTime += iQTimeForStats;
tNRes.MergeWordStats ( tMqMeta );
tNRes.m_iMultiplier = iNumQueries;
tNRes.m_iCpuTime += tMqMeta.m_iCpuTime / iNumQueries;
tNRes.m_bTotalMatchesApprox |= tMqMeta.m_bTotalMatchesApprox;
iCpuTime /= iNumQueries;
}
else if ( tNRes.m_iMultiplier==-1 ) // multiplier -1 means 'error'
{
dNFailuresSet[iQuery].Submit ( sLocal, szParent, tNRes.m_sError.cstr() );
return false;
}
++tNRes.m_iSuccesses;
tNRes.m_iCpuTime = iCpuTime;
tNRes.m_iTotalMatches += pSorter->GetTotalCount();
tNRes.m_iPredictedTime = tNRes.m_bHasPrediction ? CalcPredictedTimeMsec ( tNRes ) : 0;
m_dQueryIndexStats[iLocal].m_dStats[iQuery].m_iSuccesses = 1;
m_dQueryIndexStats[iLocal].m_dStats[iQuery].m_uQueryTime = iQTimeForStats;
m_dQueryIndexStats[iLocal].m_dStats[iQuery].m_uFoundRows = pSorter->GetTotalCount();
// extract matches from sorter
if ( !tGlobalSorters.StoreSorter ( iQuery, iLocal, dSorters[iQuery], pDocstore, iOrderTag ) )
tNRes.AddResultset ( pSorter, pDocstore, iOrderTag, m_dNQueries[iQuery].m_iCutoff );
if ( !tNRes.m_sWarning.IsEmpty() )
dNFailuresSet[iQuery].Submit ( sLocal, szParent, tNRes.m_sWarning.cstr() );
return true;
}
void SearchHandler_c::RunLocalSearches()
{
int64_t tmLocal = sphMicroTimer ();
// setup local searches
const int iQueries = m_dNQueries.GetLength ();
const int iNumLocals = m_dLocal.GetLength();
// sphWarning ( "%s:%d", __FUNCTION__, __LINE__ );
// sphWarning ("Locals: %d, queries %d", iNumLocals, iQueries );
m_dQueryIndexStats.Resize ( iNumLocals );
for ( auto & dQueryIndexStats : m_dQueryIndexStats )
dQueryIndexStats.m_dStats.Resize ( iQueries );
StrVec_t * pMainExtra = nullptr;
if ( m_dNQueries.First ().m_bAgent )
{
m_dExtraSchema.Reset (); // cleanup from any possible previous usages
pMainExtra = &m_dExtraSchema;
}
CSphVector<cServedIndexRefPtr_c> dLocalIndexes;
for ( const auto& i : m_dLocal )
dLocalIndexes.Add ( CheckIndexSelectable ( i.m_sName, nullptr ) );
GlobalSorters_c tGlobalSorters ( m_dNQueries, dLocalIndexes );
m_dPSInfo.Resize(iNumLocals);
m_dPSInfo.Fill ( { 1, 1, false } );
CSphFixedVector<int> dOrder { iNumLocals };
for ( int i = 0; i<iNumLocals; ++i )
dOrder[i] = i;
auto tDispatch = GetEffectiveBaseDispatcherTemplate();
Dispatcher::Unify ( tDispatch, m_dNQueries.First().m_tMainDispatcher );
// the context
ClonableCtx_T<LocalSearchRef_t, LocalSearchClone_t, Threads::ECONTEXT::UNORDERED> dCtx { m_tHook, pMainExtra, m_dNFailuresSet, m_dNAggrResults, m_dNResults };
auto pDispatcher = Dispatcher::Make ( iNumLocals, m_dNQueries.First().m_iConcurrency, tDispatch, dCtx.IsSingle() );
dCtx.LimitConcurrency ( pDispatcher->GetConcurrency() );
bool bSingle = pDispatcher->GetConcurrency()==1;
// sphWarning ( "iConcurrency: %d", iConcurrency );
if ( !bSingle )
{
// sphWarning ( "Reordering..." );
// if run parallel - start in mass order, if single - in natural order
// set order by decreasing index mass (heaviest one comes first). That is why 'less' implemented by '>'
dOrder.Sort ( Lesser ( [this] ( int a, int b ) {
return m_dLocal[a].m_iMass>m_dLocal[b].m_iMass;
} ) );
CalcThreadsPerIndex ( pDispatcher->GetConcurrency() );
}
// for ( int iOrder : dOrder )
// sphWarning ( "Sorted: %d, Order %d, mass %d", !!bSingle, iOrder, (int) m_dLocal[iOrder].m_iMass );
std::atomic<int32_t> iTotalSuccesses { 0 };
Coro::ExecuteN ( dCtx.Concurrency ( iNumLocals ), [&]
{
auto pSource = pDispatcher->MakeSource();
int iJob = -1; // make it consumed
if ( !pSource->FetchTask ( iJob ) )
{
LOCSEARCHINFO << "Early finish parallel RunLocalSearches because of empty queue";
return; // already nothing to do, early finish.
}
// these two moved from inside the loop to avoid construction on every turn
CSphVector<ISphMatchSorter *> dSorters ( iQueries );
dSorters.ZeroVec ();
auto tJobContext = dCtx.CloneNewContext();
auto& tCtx = tJobContext.first;
LOCSEARCHINFO << "RunLocalSearches cloned context " << tJobContext.second;
Threads::Coro::SetThrottlingPeriodMS ( session::GetThrottlingPeriodMS() );
while ( true )
{
if ( !pSource->FetchTask ( iJob ) )
return; // all is done
auto iLocal = dOrder[iJob];
LOCSEARCHINFO << "RunLocalSearches " << tJobContext.second << ", iJob: " << iJob << ", iLocal: " << iLocal << ", mass " << ( (int)m_dLocal[iLocal].m_iMass );
iJob = -1; // mark it consumed
int64_t iCpuTime = -sphTaskCpuTimer ();
// FIXME!!! handle different proto
myinfo::SetTaskInfo( R"(api-search query="%s" comment="%s" index="%s")",
m_dNQueries.First().m_sQuery.scstr (),
m_dNQueries.First().m_sComment.scstr (),
m_dLocal[iLocal].m_sName.scstr ());
const LocalIndex_t & dLocal = m_dLocal[iLocal];
const CSphString& sLocal = dLocal.m_sName;
const char * szParent = dLocal.m_sParentIndex.cstr ();
int iIndexWeight = dLocal.m_iWeight;
auto& dNFailuresSet = tCtx.m_dFailuresSet;
auto& dNAggrResults = tCtx.m_dAggrResults;
auto& dNResults = tCtx.m_dResults;
auto* pExtra = tCtx.m_pExtra;
// publish crash query index
GlobalCrashQueryGetRef().m_dIndex = FromStr ( sLocal );
// prepare and check the index
cServedIndexRefPtr_c pServed = CheckIndexSelectable ( sLocal, szParent, &dNFailuresSet );
if ( !pServed )
continue;
CSphVector<JoinedServedIndex_t> dJoinedServed;
if ( !PopulateJoinedIndexes ( dJoinedServed, dNFailuresSet ) )
continue;
bool bResult = false;
CSphQueryResultMeta tMqMeta;
CSphQueryResult tMqRes;
tMqRes.m_pMeta = &tMqMeta;
{ // scope for r-locking the index
RIdx_c pIndex { pServed };
tCtx.m_tHook.SetIndex ( pIndex );
tCtx.m_tHook.SetQueryType ( m_eQueryType );
std::vector<RIdx_c> dRLockedJoined;
CSphVector<const CSphIndex*> dJoinedIndexes = GetRlockedJoinedIndexes ( dJoinedServed, dRLockedJoined );
// create sorters
SphQueueRes_t tQueueRes;
if ( !CreateValidSorters ( dSorters, &tQueueRes, dNFailuresSet, pExtra, pIndex, dJoinedIndexes, sLocal, szParent, &tCtx.m_tHook ) )
continue;
// do the query
CSphMultiQueryArgs tMultiArgs ( iIndexWeight );
tMultiArgs.m_uPackedFactorFlags = tQueueRes.m_uPackedFactorFlags;
if ( m_bGotLocalDF )
{
tMultiArgs.m_bLocalDF = true;
tMultiArgs.m_pLocalDocs = &m_hLocalDocs;
tMultiArgs.m_iTotalDocs = m_iTotalDocs;
}
bool bCanBeCloned = dSorters.all_of ( []( auto * pSorter ){ return pSorter ? pSorter->CanBeCloned() : true; } );
// fixme: previous calculations are wrong; we are not splitting the query if we are using non-clonable sorters
tMultiArgs.m_iThreads = bCanBeCloned ? m_dPSInfo[iLocal].m_iThreads : 1;
tMultiArgs.m_iTotalThreads = m_dPSInfo[iLocal].m_iMaxThreads;
tMultiArgs.m_bFinalizeSorters = !tGlobalSorters.NeedGlobalSorters();
LOCSEARCHINFO << "RunLocalSearches index:" << pIndex->GetName();
dNAggrResults.First().m_tIOStats.Start ();
if ( m_bMultiQueue )
bResult = pIndex->MultiQuery ( tMqRes, m_dNQueries.First(), dSorters, tMultiArgs );
else
bResult = pIndex->MultiQueryEx ( iQueries, &m_dNQueries[0], &dNResults[0], &dSorters[0], tMultiArgs );
dNAggrResults.First ().m_tIOStats.Stop ();
}
iCpuTime += sphTaskCpuTimer ();
// handle results
if ( bResult )
{
// multi-query succeeded
for ( int i=0; i<iQueries; ++i )
{
// in mt here kind of tricky index calculation, up to the next lines with sorter
// but some sorters could have failed at "create sorter" stage
ISphMatchSorter * pSorter = dSorters[i];
if ( !pSorter )
continue;
if ( SubmitSuccess ( dSorters, tGlobalSorters, tCtx, iCpuTime, i, iLocal, tMqMeta, tMqRes ) )
iTotalSuccesses.fetch_add ( 1, std::memory_order_relaxed );
}
} else
// failed, submit local (if not empty) or global error string
for ( int i = 0; i<iQueries; ++i )
dNFailuresSet[i].Submit ( sLocal, szParent, tMqMeta.m_sError.IsEmpty ()
? dNAggrResults[m_bMultiQueue ? 0 : i].m_sError.cstr ()
: tMqMeta.m_sError.cstr () );
// cleanup sorters
for ( auto &pSorter : dSorters )
SafeDelete ( pSorter );
LOCSEARCHINFO << "RunLocalSearches result " << bResult << " index " << sLocal;
if ( !pSource->FetchTask ( iJob ) )
return; // all is done
Threads::Coro::ThrottleAndKeepCrashQuery (); // we set CrashQuery anyway at the start of the loop
}
});
LOCSEARCHINFO << "RunLocalSearches processed in " << dCtx.NumWorked() << " thread(s)";
dCtx.Finalize (); // merge mt results (if any)
tGlobalSorters.MergeResults(m_dNAggrResults);
// update our wall time for every result set
tmLocal = sphMicroTimer ()-tmLocal;
for ( int iQuery = 0; iQuery<iQueries; ++iQuery )
m_dNAggrResults[iQuery].m_iQueryTime += (int) ( tmLocal / 1000 );
auto iTotalSuccessesInt = iTotalSuccesses.load ( std::memory_order_relaxed );
for ( auto iLocal = 0; iLocal<iNumLocals; ++iLocal )
for ( int iQuery = 0; iQuery<iQueries; ++iQuery )
{
QueryStat_t & tStat = m_dQueryIndexStats[iLocal].m_dStats[iQuery];
if ( tStat.m_iSuccesses )
tStat.m_uQueryTime = (int) ( tmLocal / 1000 / iTotalSuccessesInt );
}
}
// check expressions into a query to make sure that it's ready for multi query optimization
bool SearchHandler_c::AllowsMulti() const
{
if ( m_bFacetQueue )
return true;
// in some cases the same select list allows queries to be multi query optimized
// but we need to check dynamic parts size equality and we do it later in RunLocalSearches()
const CSphVector<CSphQueryItem> & tFirstQueryItems = m_dNQueries.First().m_dItems;
bool bItemsSameLen = true;
for ( int i=1; i<m_dNQueries.GetLength() && bItemsSameLen; ++i )
bItemsSameLen = ( tFirstQueryItems.GetLength()==m_dNQueries[i].m_dItems.GetLength() );
if ( bItemsSameLen )
{
bool bSameItems = true;
ARRAY_FOREACH_COND ( i, tFirstQueryItems, bSameItems )
{
const CSphQueryItem & tItem1 = tFirstQueryItems[i];
for ( int j=1; j<m_dNQueries.GetLength () && bSameItems; ++j )
{
const CSphQueryItem & tItem2 = m_dNQueries[j].m_dItems[i];
bSameItems = tItem1.m_sExpr==tItem2.m_sExpr && tItem1.m_eAggrFunc==tItem2.m_eAggrFunc;
}
}
if ( bSameItems )
return true;
}
// if select lists do not contain any expressions we can optimize queries too
for ( const auto & dLocal : m_dLocal )
{
RIdx_c pServedIndex ( m_dAcquired.Get ( dLocal.m_sName ) );
// FIXME!!! compare expressions as m_pExpr->GetHash
const CSphSchema & tSchema = pServedIndex->GetMatchSchema();
if ( m_dNQueries.any_of ( [&tSchema] ( const CSphQuery & tQ ) { return sphHasExpressions ( tQ, tSchema ); } ) )
return false;
}
return true;
}
struct IndexSettings_t
{
uint64_t m_uHash;
int m_iLocal;
};
void SearchHandler_c::SetupLocalDF ()
{
if ( m_dLocal.GetLength()<2 )
return;
SwitchProfile ( m_pProfile, SPH_QSTATE_LOCAL_DF );
bool bGlobalIDF = true;
ARRAY_FOREACH_COND ( i, m_dLocal, bGlobalIDF )
{
auto pDesc = GetServed( m_dLocal[i].m_sName );
bGlobalIDF = ( pDesc && !pDesc->m_sGlobalIDFPath.IsEmpty () );
}
// bail out on all indexes with global idf set
if ( bGlobalIDF )
return;
bool bOnlyNoneRanker = true;
bool bOnlyFullScan = true;
bool bHasLocalDF = false;
for ( const CSphQuery & tQuery : m_dNQueries )
{
bOnlyFullScan &= tQuery.m_sQuery.IsEmpty();
bHasLocalDF |= tQuery.m_bLocalDF.value_or ( false );
if ( !tQuery.m_sQuery.IsEmpty() && tQuery.m_bLocalDF.value_or ( false ) )
bOnlyNoneRanker &= ( tQuery.m_eRanker==SPH_RANK_NONE );
}
// bail out queries: full-scan, ranker=none, local_idf=0
if ( bOnlyFullScan || bOnlyNoneRanker || !bHasLocalDF )
return;
CSphVector<char> dQuery ( 512 );
dQuery.Resize ( 0 );
for ( const CSphQuery & tQuery : m_dNQueries )
{
if ( tQuery.m_sQuery.IsEmpty() || !tQuery.m_bLocalDF.value_or ( false ) || tQuery.m_eRanker==SPH_RANK_NONE )
continue;
int iLen = tQuery.m_sQuery.Length();
auto * pDst = dQuery.AddN ( iLen + 1 );
memcpy ( pDst, tQuery.m_sQuery.cstr(), iLen );
dQuery.Last() = ' '; // queries delimiter
}
// bail out on empty queries
if ( !dQuery.GetLength() )
return;
dQuery.Add ( '\0' );
// order indexes by settings
CSphVector<IndexSettings_t> dLocal ( m_dLocal.GetLength() );
dLocal.Resize ( 0 );
ARRAY_FOREACH ( i, m_dLocal )
{
dLocal.Add();
dLocal.Last().m_iLocal = i;
// TODO: cache settingsFNV on index load
// FIXME!!! no need to count dictionary hash
RIdx_c pIndex ( m_dAcquired.Get ( m_dLocal[i].m_sName ) );
dLocal.Last().m_uHash = pIndex->GetTokenizer()->GetSettingsFNV() ^ pIndex->GetDictionary()->GetSettingsFNV();
}
dLocal.Sort ( bind ( &IndexSettings_t::m_uHash ) );
// gather per-term docs count
CSphVector < CSphKeywordInfo > dKeywords;
ARRAY_FOREACH ( i, dLocal )
{
int iLocalIndex = dLocal[i].m_iLocal;
RIdx_c pIndex ( m_dAcquired.Get ( m_dLocal[iLocalIndex].m_sName ) );
m_iTotalDocs += pIndex->GetStats().m_iTotalDocuments;
if ( i && dLocal[i].m_uHash==dLocal[i-1].m_uHash )
{
dKeywords.Apply ( [] ( CSphKeywordInfo & tKw ) { tKw.m_iDocs = 0; } );
// no need to tokenize query just fill docs count
pIndex->FillKeywords ( dKeywords );
} else
{
GetKeywordsSettings_t tSettings;
tSettings.m_bStats = true;
dKeywords.Resize ( 0 );
pIndex->GetKeywords ( dKeywords, dQuery.Begin(), tSettings, NULL );
// FIXME!!! move duplicate removal to GetKeywords to do less QWord setup and dict searching
// custom uniq - got rid of word duplicates
dKeywords.Sort ( bind ( &CSphKeywordInfo::m_sNormalized ) );
if ( dKeywords.GetLength()>1 )
{
int iSrc = 1, iDst = 1;
while ( iSrc<dKeywords.GetLength() )
{
if ( dKeywords[iDst-1].m_sNormalized==dKeywords[iSrc].m_sNormalized )
iSrc++;
else
{
Swap ( dKeywords[iDst], dKeywords[iSrc] );
iDst++;
iSrc++;
}
}
dKeywords.Resize ( iDst );
}
}
for ( auto& tKw: dKeywords )
{
int64_t * pDocs = m_hLocalDocs ( tKw.m_sNormalized );
if ( pDocs )
*pDocs += tKw.m_iDocs;
else
m_hLocalDocs.Add ( tKw.m_iDocs, tKw.m_sNormalized );
}
}
m_bGotLocalDF = true;
}
static int GetIndexWeight ( const CSphString& sName, const CSphVector<CSphNamedInt> & dIndexWeights, int iDefaultWeight )
{
for ( auto& dWeight : dIndexWeights )
if ( dWeight.first==sName )
return dWeight.second;
// distributed index adds {'*', weight} to all agents in case it got custom weight
if ( dIndexWeights.GetLength() && dIndexWeights.Last().first=="*" )
return dIndexWeights[0].second;
return iDefaultWeight;
}
uint64_t CalculateMass ( const CSphIndexStatus & dStats )
{
auto iOvermapped = dStats.m_iMapped-dStats.m_iMappedResident;
if ( iOvermapped<0 ) // it could be negative since resident is rounded up to page edge
iOvermapped = 0;
return 1000000 * dStats.m_iNumChunks
+ 10 * iOvermapped
+ dStats.m_iRamUse;
}
static uint64_t GetIndexMass ( const CSphString & sName )
{
return ServedIndex_c::GetIndexMass ( GetServed ( sName ) );
}
// declared to be used in ParseSysVar
void HandleMysqlShowThreads ( RowBuffer_i & tOut, const SqlStmt_t * pStmt );
void HandleMysqlShowTables ( RowBuffer_i & tOut, const SqlStmt_t * pStmt );
void HandleShowSessions ( RowBuffer_i& tOut, const SqlStmt_t* pStmt );
void HandleMysqlDescribe ( RowBuffer_i & tOut, const SqlStmt_t * pStmt );
void HandleSelectIndexStatus ( RowBuffer_i & tOut, const SqlStmt_t * pStmt );
void HandleSelectFiles ( RowBuffer_i & tOut, const SqlStmt_t * pStmt );
bool SearchHandler_c::ParseSysVar ()
{
const auto& sVar = m_dLocal.First().m_sName;
const auto & dSubkeys = m_dNQueries.First ().m_dStringSubkeys;
if ( sVar=="@@system" )
{
if ( !dSubkeys.IsEmpty () )
{
bool bSchema = ( dSubkeys.Last ()==".@table" );
bool bValid = true;
TableFeeder_fn fnFeed;
if ( dSubkeys[0]==".threads" ) // select .. from @@system.threads
{
if ( m_pStmt->m_sThreadFormat.IsEmpty() ) // override format to show all columns by default
m_pStmt->m_sThreadFormat="all";
fnFeed = [this] ( RowBuffer_i * pBuf ) { HandleMysqlShowThreads ( *pBuf, m_pStmt ); };
}
else if ( dSubkeys[0]==".tables" ) // select .. from @@system.tables
{
fnFeed = [this] ( RowBuffer_i * pBuf ) { HandleMysqlShowTables ( *pBuf, m_pStmt ); };
}
else if ( dSubkeys[0]==".tasks" ) // select .. from @@system.tasks
{
fnFeed = [] ( RowBuffer_i * pBuf ) { HandleTasks ( *pBuf ); };
}
else if ( dSubkeys[0]==".sched" ) // select .. from @@system.sched
{
fnFeed = [] ( RowBuffer_i * pBuf ) { HandleSched ( *pBuf ); };
} else if ( dSubkeys[0] == ".sessions" ) // select .. from @@system.sched
{
fnFeed = [this] ( RowBuffer_i* pBuf ) { HandleShowSessions ( *pBuf, m_pStmt ); };
}
else
bValid = false;
if ( bValid )
{
cServedIndexRefPtr_c pIndex;
if ( bSchema )
{
m_dLocal.First ().m_sName.SetSprintf( "@@system.%s.@table", dSubkeys[0].cstr() );
pIndex = MakeDynamicIndexSchema ( std::move ( fnFeed ) );
} else {
m_dLocal.First ().m_sName.SetSprintf ( "@@system.%s", dSubkeys[0].cstr () );
pIndex = MakeDynamicIndex ( std::move ( fnFeed ) );
}
m_dAcquired.AddIndex ( m_dLocal.First ().m_sName, std::move (pIndex) );
return true;
}
}
}
m_sError << "no such variable " << sVar;
dSubkeys.for_each ( [this] ( const auto& s ) { m_sError << s; } );
return false;
}
bool SearchHandler_c::ParseIdxSubkeys ()
{
const auto & sVar = m_dLocal.First ().m_sName;
const auto & dSubkeys = m_dNQueries.First ().m_dStringSubkeys;
assert ( !dSubkeys.IsEmpty () );
bool bSchema = ( dSubkeys.GetLength()>1 && dSubkeys.Last ()==".@table" );
TableFeeder_fn fnFeed;
if ( dSubkeys[0]==".@table" ) // select .. idx.table
fnFeed = [this] ( RowBuffer_i * pBuf ) { HandleMysqlDescribe ( *pBuf, m_pStmt ); };
else if ( dSubkeys[0]==".@status" ) // select .. idx.status
fnFeed = [this] ( RowBuffer_i * pBuf ) { HandleSelectIndexStatus ( *pBuf, m_pStmt ); };
else if ( dSubkeys[0]==".@files" ) // select .. from idx.files
fnFeed = [this] ( RowBuffer_i * pBuf ) { HandleSelectFiles ( *pBuf, m_pStmt ); };
else
{
m_sError << "No such table " << sVar;
dSubkeys.for_each ([this] (const auto& s) { m_sError << s;});
return false;
}
cServedIndexRefPtr_c pIndex;
if ( bSchema )
{
m_dLocal.First ().m_sName.SetSprintf ( "%s%s.@table", sVar.cstr (), dSubkeys[0].cstr () );
pIndex = MakeDynamicIndexSchema ( std::move ( fnFeed ) );
} else
{
m_dLocal.First ().m_sName.SetSprintf ( "%s%s", sVar.cstr (), dSubkeys[0].cstr () );
pIndex = MakeDynamicIndex ( std::move ( fnFeed ) );
}
m_dAcquired.AddIndex ( m_dLocal.First().m_sName, std::move ( pIndex ) );
return true;
}
////////////////////////////////////////////////////////////////
// check for single-query, multi-queue optimization possibility
////////////////////////////////////////////////////////////////
bool SearchHandler_c::CheckMultiQuery() const
{
const int iQueries = m_dNQueries.GetLength();
if ( iQueries<=1 )
return false;
const CSphQuery & qFirst = m_dNQueries.First();
auto dQueries = m_dNQueries.Slice ( 1 );
// queries over special indexes as status/meta are not capable for multiquery
if ( !qFirst.m_dStringSubkeys.IsEmpty() )
return false;
for ( const CSphQuery & qCheck : dQueries )
{
// these parameters must be the same
if (
( qCheck.m_sRawQuery!=qFirst.m_sRawQuery ) || // query string
( qCheck.m_dWeights.GetLength ()!=qFirst.m_dWeights.GetLength () ) || // weights count
( qCheck.m_dWeights.GetLength () && memcmp ( qCheck.m_dWeights.Begin (), qFirst.m_dWeights.Begin (),
sizeof ( qCheck.m_dWeights[0] ) * qCheck.m_dWeights.GetLength () ) ) || // weights
( qCheck.m_eMode!=qFirst.m_eMode ) || // search mode
( qCheck.m_eRanker!=qFirst.m_eRanker ) || // ranking mode
( qCheck.m_dFilters.GetLength ()!=qFirst.m_dFilters.GetLength () ) || // attr filters count
( qCheck.m_dFilterTree.GetLength ()!=qFirst.m_dFilterTree.GetLength () ) ||
( qCheck.m_iCutoff!=qFirst.m_iCutoff ) || // cutoff
( qCheck.m_eSort==SPH_SORT_EXPR && qFirst.m_eSort==SPH_SORT_EXPR && qCheck.m_sSortBy!=qFirst.m_sSortBy )
|| // sort expressions
( qCheck.m_bGeoAnchor!=qFirst.m_bGeoAnchor ) || // geodist expression
( qCheck.m_bGeoAnchor && qFirst.m_bGeoAnchor
&& ( qCheck.m_fGeoLatitude!=qFirst.m_fGeoLatitude
|| qCheck.m_fGeoLongitude!=qFirst.m_fGeoLongitude ) ) ) // some geodist cases
return false;
// filters must be the same too
assert ( qCheck.m_dFilters.GetLength ()==qFirst.m_dFilters.GetLength () );
assert ( qCheck.m_dFilterTree.GetLength ()==qFirst.m_dFilterTree.GetLength () );
ARRAY_FOREACH ( i, qCheck.m_dFilters )
{
if ( qCheck.m_dFilters[i]!=qFirst.m_dFilters[i] )
return false;
}
ARRAY_FOREACH ( i, qCheck.m_dFilterTree )
{
if ( qCheck.m_dFilterTree[i]!=qFirst.m_dFilterTree[i] )
return false;
}
}
return true;
}
// lock local indexes invoked in query
// Fails if an index is absent and this is not allowed
bool SearchHandler_c::AcquireInvokedIndexes()
{
// add indexes required by JOIN
// but don't try to acquire local indexes if query is issued only for remote distributed
if ( m_dLocal.GetLength() )
{
StringBuilder_c sFailed (", ");
for ( const auto & tQuery : m_dNQueries )
if ( !tQuery.m_sJoinIdx.IsEmpty() && !m_dAcquired.AddUniqIndex ( tQuery.m_sJoinIdx ) )
sFailed << tQuery.m_sJoinIdx;
if ( !sFailed.IsEmpty() )
{
m_sError << "unknown local table(s) '" << sFailed << "' in search request";
return false;
}
}
// if unexistent allowed, short flow
if ( m_dNQueries.First().m_bIgnoreNonexistentIndexes )
{
ARRAY_FOREACH ( i, m_dLocal )
if ( !m_dAcquired.AddUniqIndex ( m_dLocal[i].m_sName ) )
m_dLocal.Remove ( i-- );
return true;
}
// _build the list of non-existent
StringBuilder_c sFailed (", ");
for ( const auto & dLocal : m_dLocal )
if ( !m_dAcquired.AddUniqIndex ( dLocal.m_sName ) )
sFailed << dLocal.m_sName;
// no absent indexes, viola!
if ( sFailed.IsEmpty ())
return true;
// report failed
m_sError << "unknown local table(s) '" << sFailed << "' in search request";
return false;
}
// uniq dLocals and copy into m_dLocal only uniq part.
void SearchHandler_c::UniqLocals ( VecTraits_T<LocalIndex_t> & dLocals )
{
int iLen = dLocals.GetLength ();
if ( !iLen )
return;
CSphVector<int> dOrder;
dOrder.Resize ( dLocals.GetLength() );
dOrder.FillSeq();
dOrder.Sort ( Lesser ( [&dLocals] ( int a, int b )
{
return ( dLocals[a].m_sName<dLocals[b].m_sName )
|| ( dLocals[a].m_sName==dLocals[b].m_sName && dLocals[a].m_iOrderTag>dLocals[b].m_iOrderTag );
}));
int iSrc = 1, iDst = 1;
while ( iSrc<iLen )
{
if ( dLocals[dOrder[iDst-1]].m_sName==dLocals[dOrder[iSrc]].m_sName )
++iSrc;
else
dOrder[iDst++] = dOrder[iSrc++];
}
dOrder.Resize ( iDst );
m_dLocal.Resize ( iDst );
ARRAY_FOREACH ( i, dOrder )
m_dLocal[i] = std::move ( dLocals[dOrder[i]] );
}
void SearchHandler_c::CalcTimeStats ( int64_t tmCpu, int64_t tmSubset, const CSphVector<DistrServedByAgent_t> & dDistrServedByAgent )
{
// in multi-queue case (1 actual call per N queries), just divide overall query time evenly
// otherwise (N calls per N queries), divide common query time overheads evenly
const int iQueries = m_dNQueries.GetLength();
if ( m_bMultiQueue )
{
for ( auto & dResult : m_dNAggrResults )
{
dResult.m_iQueryTime = (int)( tmSubset/1000/iQueries );
dResult.m_iRealQueryTime = (int)( tmSubset/1000/iQueries );
dResult.m_iCpuTime = tmCpu/iQueries;
}
return;
}
int64_t tmAccountedWall = 0;
int64_t tmAccountedCpu = 0;
for ( const auto & dResult : m_dNAggrResults )
{
tmAccountedWall += dResult.m_iQueryTime*1000;
assert ( ( dResult.m_iCpuTime==0 && dResult.m_iAgentCpuTime==0 ) || // all work was done in this thread
( dResult.m_iCpuTime>0 && dResult.m_iAgentCpuTime==0 ) || // children threads work
( dResult.m_iAgentCpuTime>0 && dResult.m_iCpuTime==0 ) ); // agents work
tmAccountedCpu += dResult.m_iCpuTime;
tmAccountedCpu += dResult.m_iAgentCpuTime;
}
// whether we had work done in children threads (dist_threads>1) or in agents
bool bExternalWork = tmAccountedCpu!=0;
int64_t tmDeltaWall = ( tmSubset - tmAccountedWall ) / iQueries;
for ( auto & dResult : m_dNAggrResults )
{
dResult.m_iQueryTime += (int)(tmDeltaWall/1000);
dResult.m_iRealQueryTime = (int)( tmSubset/1000/iQueries );
dResult.m_iCpuTime = tmCpu/iQueries;
if ( bExternalWork )
dResult.m_iCpuTime += tmAccountedCpu;
}
// don't forget to add this to stats
if ( bExternalWork )
tmCpu += tmAccountedCpu;
// correct per-index stats from agents
int iTotalSuccesses = 0;
for ( const auto & dResult : m_dNAggrResults )
iTotalSuccesses += dResult.m_iSuccesses;
if ( !iTotalSuccesses )
return;
int64_t tmDelta = tmSubset - tmAccountedWall;
auto nValidDistrIndexes = dDistrServedByAgent.count_of ( [] ( auto& t ) { return t.m_dStats.any_of ( [] ( auto& i ) { return i.m_iSuccesses; } ); } );
int64_t nDistrDivider = iTotalSuccesses * nValidDistrIndexes * 1000;
if ( nDistrDivider )
for ( auto &tDistrStat : dDistrServedByAgent )
for ( QueryStat_t& tStat : tDistrStat.m_dStats )
{
auto tmDeltaWallAgent = tmDelta * tStat.m_iSuccesses / nDistrDivider;
tStat.m_uQueryTime += (int)tmDeltaWallAgent;
}
auto nValidLocalIndexes = m_dQueryIndexStats.count_of ( [] ( auto& t ) { return t.m_dStats.any_of ( [] ( auto& i ) { return i.m_iSuccesses; } ); } );
int64_t nLocalDivider = iTotalSuccesses * nValidLocalIndexes * 1000;
if ( nLocalDivider )
for ( auto &dQueryIndexStat : m_dQueryIndexStats )
for ( QueryStat_t& tStat : dQueryIndexStat.m_dStats )
{
int64_t tmDeltaWallLocal = tmDelta * tStat.m_iSuccesses / nLocalDivider;
tStat.m_uQueryTime += (int)tmDeltaWallLocal;
}
}
void SearchHandler_c::CalcPerIndexStats ( const CSphVector<DistrServedByAgent_t> & dDistrServedByAgent ) const
{
const int iQueries = m_dNQueries.GetLength();
// calculate per-index stats
ARRAY_FOREACH ( iLocal, m_dLocal )
{
const auto& pServed = m_dAcquired.Get ( m_dLocal[iLocal].m_sName );
for ( int iQuery=0; iQuery<iQueries; ++iQuery )
{
QueryStat_t & tStat = m_dQueryIndexStats[iLocal].m_dStats[iQuery];
if ( !tStat.m_iSuccesses )
continue;
pServed->m_pStats->AddQueryStat ( tStat.m_uFoundRows, tStat.m_uQueryTime );
for ( auto &tDistr : dDistrServedByAgent )
{
if ( tDistr.m_dLocalNames.Contains ( m_dLocal[iLocal].m_sName ) )
{
tDistr.m_dStats[iQuery].m_uQueryTime += tStat.m_uQueryTime;
tDistr.m_dStats[iQuery].m_uFoundRows += tStat.m_uFoundRows;
++tDistr.m_dStats[iQuery].m_iSuccesses;
}
}
}
}
for ( auto &tDistr : dDistrServedByAgent )
{
auto pServedDistIndex = GetDistr ( tDistr.m_sIndex );
if ( pServedDistIndex )
for ( int iQuery=0; iQuery<iQueries; ++iQuery )
{
auto & tStat = tDistr.m_dStats[iQuery];
if ( !tStat.m_iSuccesses )
continue;
pServedDistIndex->m_tStats.AddQueryStat ( tStat.m_uFoundRows, tStat.m_uQueryTime );
}
}
}
void SearchHandler_c::CalcGlobalStats ( int64_t tmCpu, int64_t tmSubset, int64_t tmLocal, const CSphIOStats & tIO, const VecRefPtrsAgentConn_t & dRemotes ) const
{
auto & g_tStats = gStats ();
g_tStats.m_iQueries.fetch_add ( m_dNQueries.GetLength (), std::memory_order_relaxed );
g_tStats.m_iQueryTime.fetch_add ( tmSubset, std::memory_order_relaxed );
g_tStats.m_iQueryCpuTime.fetch_add ( tmCpu, std::memory_order_relaxed );
if ( dRemotes.GetLength() )
{
int64_t tmWait = 0;
for ( const AgentConn_t * pAgent : dRemotes )
tmWait += pAgent->m_iWaited;
// do *not* count queries to dist indexes w/o actual remote agents
g_tStats.m_iDistQueries.fetch_add ( 1, std::memory_order_relaxed );
g_tStats.m_iDistWallTime.fetch_add ( tmSubset, std::memory_order_relaxed );
g_tStats.m_iDistLocalTime.fetch_add ( tmLocal, std::memory_order_relaxed );
g_tStats.m_iDistWaitTime.fetch_add ( tmWait, std::memory_order_relaxed );
}
g_tStats.m_iDiskReads.fetch_add ( tIO.m_iReadOps, std::memory_order_relaxed );
g_tStats.m_iDiskReadTime.fetch_add ( tIO.m_iReadTime, std::memory_order_relaxed );
g_tStats.m_iDiskReadBytes.fetch_add ( tIO.m_iReadBytes, std::memory_order_relaxed );
}
static CSphVector<LocalIndex_t> CollectAllLocalIndexes ( const CSphVector<CSphNamedInt> & dIndexWeights )
{
CSphVector<LocalIndex_t> dIndexes;
int iOrderTag = 0;
// search through all local indexes
ServedSnap_t hLocal = g_pLocalIndexes->GetHash();
for ( auto& tIt : *hLocal )
{
if ( !tIt.second ) // fixme! should never be...
continue;
auto & dLocal = dIndexes.Add ();
dLocal.m_sName = tIt.first;
dLocal.m_iOrderTag = iOrderTag++;
dLocal.m_iWeight = GetIndexWeight ( tIt.first, dIndexWeights, 1 );
dLocal.m_iMass = ServedIndex_c::GetIndexMass ( tIt.second );
}
return dIndexes;
}
// returns true = real indexes, false = sysvar (i.e. only one 'index' named from @@)
bool SearchHandler_c::BuildIndexList ( int & iDivideLimits, VecRefPtrsAgentConn_t & dRemotes, CSphVector<DistrServedByAgent_t> & dDistrServedByAgent )
{
const CSphQuery & tQuery = m_dNQueries.First ();
if ( tQuery.m_sIndexes=="*" )
{
// they're all local, build the list
m_dLocal = CollectAllLocalIndexes ( tQuery.m_dIndexWeights );
return true;
}
m_dLocal.Reset ();
int iOrderTag = 0;
bool bSysVar = tQuery.m_sIndexes.Begins ( "@@" );
// search through specified local indexes
StrVec_t dIdxNames;
if ( bSysVar )
dIdxNames.Add ( tQuery.m_sIndexes );
else
ParseIndexList ( tQuery.m_sIndexes, dIdxNames );
const int iQueries = m_dNQueries.GetLength ();
CSphVector<LocalIndex_t> dLocals;
int iDistCount = 0;
bool bDivideRemote = false;
bool bHasLocalsAgents = false;
for ( const auto& sIndex : dIdxNames )
{
auto pDist = GetDistr ( sIndex );
if ( !pDist )
{
auto &dLocal = dLocals.Add ();
dLocal.m_sName = sIndex;
dLocal.m_iOrderTag = iOrderTag++;
dLocal.m_iWeight = GetIndexWeight ( sIndex, tQuery.m_dIndexWeights, 1 );
dLocal.m_iMass = GetIndexMass ( sIndex );
} else
{
++iDistCount;
int iWeight = GetIndexWeight ( sIndex, tQuery.m_dIndexWeights, -1 );
auto & tDistrStat = dDistrServedByAgent.Add();
tDistrStat.m_sIndex = sIndex;
tDistrStat.m_dStats.Resize ( iQueries );
tDistrStat.m_dStats.ZeroVec();
for ( const auto& pAgent : pDist->m_dAgents )
{
tDistrStat.m_dAgentIds.Add ( dRemotes.GetLength() );
auto * pConn = new AgentConn_t;
pConn->SetMultiAgent ( pAgent );
pConn->m_iStoreTag = iOrderTag++;
pConn->m_iWeight = iWeight;
pConn->m_iMyConnectTimeoutMs = pDist->GetAgentConnectTimeoutMs();
pConn->m_iMyQueryTimeoutMs = ( tQuery.m_iAgentQueryTimeoutMs!=DEFAULT_QUERY_TIMEOUT ? tQuery.m_iAgentQueryTimeoutMs : pDist->GetAgentQueryTimeoutMs() );
dRemotes.Add ( pConn );
}
ARRAY_CONSTFOREACH ( j, pDist->m_dLocal )
{
const CSphString& sLocalAgent = pDist->m_dLocal[j];
tDistrStat.m_dLocalNames.Add ( sLocalAgent );
auto &dLocal = dLocals.Add ();
dLocal.m_sName = sLocalAgent;
dLocal.m_iOrderTag = iOrderTag++;
if ( iWeight!=-1 )
dLocal.m_iWeight = iWeight;
dLocal.m_iMass = GetIndexMass ( sLocalAgent );
dLocal.m_sParentIndex = sIndex;
bHasLocalsAgents = true;
}
bDivideRemote |= pDist->m_bDivideRemoteRanges;
}
}
// set remote divider
if ( bDivideRemote )
{
if ( iDistCount==1 )
iDivideLimits = dRemotes.GetLength();
else
{
for ( auto& dResult : m_dNAggrResults )
dResult.m_sWarning.SetSprintf ( "distributed multi-table query '%s' doesn't support divide_remote_ranges", tQuery.m_sIndexes.cstr() );
}
}
// eliminate local dupes that come from distributed indexes
if ( bHasLocalsAgents )
UniqLocals ( dLocals );
else
m_dLocal.SwapData ( dLocals );
return !bSysVar;
}
// generate warning about slow full text expansion for queries there
// merged terms is less then expanded terms
// slower then query_log_min_msec or slower 100ms
static void CheckExpansion ( CSphQueryResultMeta & tMeta )
{
if ( tMeta.m_hWordStats.IsEmpty() || !tMeta.m_tExpansionStats.m_iTerms )
return;
if ( tMeta.m_tExpansionStats.m_iMerged>=tMeta.m_tExpansionStats.m_iTerms )
return;
if ( tMeta.m_iQueryTime<100 || ( g_iQueryLogMinMs>0 && tMeta.m_iQueryTime<g_iQueryLogMinMs ) )
return;
int iTotal = tMeta.m_tExpansionStats.m_iMerged + tMeta.m_tExpansionStats.m_iTerms;
int iMerged = (int)( float(tMeta.m_tExpansionStats.m_iMerged) * 100.0f / iTotal );
StringBuilder_c sBuf;
// notice, msg should not be finished with dot, and start with capital. That is because several messages can be unified with '; ' delimiter.
sBuf.Appendf ( "current merge of expanded terms is %d%%, with a total of %d. Read manual about 'expansion_merge_threshold_docs/hits'", iMerged, iTotal );
if ( !tMeta.m_sWarning.IsEmpty() )
sBuf.Appendf ( "; %s", tMeta.m_sWarning.cstr() );
sBuf.MoveTo ( tMeta.m_sWarning );
}
// query info - render query into the view
struct QueryInfo_t : public TaskInfo_t
{
DECLARE_RENDER( QueryInfo_t );
// actually it is 'virtually hazard'. Don't care about query* itself, however later in dtr of Searchandler_t
// will work with refs to members of it's m_dQueries and retire or whole vec.
std::atomic<const CSphQuery *> m_pHazardQuery;
};
DEFINE_RENDER ( QueryInfo_t )
{
auto & tInfo = *(QueryInfo_t *) pSrc;
dDst.m_sChain << "Query ";
hazard::Guard_c tGuard;
auto pQuery = tGuard.Protect ( tInfo.m_pHazardQuery );
if ( pQuery && session::GetProto()!=Proto_e::MYSQL41 ) // cheat: for mysql query not used, so will not copy it then
dDst.m_pQuery = std::make_unique<CSphQuery> ( *pQuery );
}
static void FillupFacetError ( int iQueries, const CSphVector<CSphQuery> & dQueries, VecTraits_T<AggrResult_t> & dAggrResults )
{
if ( iQueries>1 && !dAggrResults.Begin()->m_iSuccesses && dAggrResults.Begin()->m_sError.IsEmpty() && dQueries.Begin()->m_bFacetHead )
{
const CSphString * pError = nullptr;
for ( int iRes=0; iRes<iQueries; ++iRes )
{
const AggrResult_t & tRes = dAggrResults[iRes];
if ( !tRes.m_iSuccesses && !tRes.m_sError.IsEmpty() )
{
pError = &tRes.m_sError;
break;
}
}
if ( !pError )
return;
for ( int iRes=0; iRes<iQueries; ++iRes )
{
AggrResult_t & tRes = dAggrResults[iRes];
if ( !tRes.m_sError.IsEmpty() )
break;
tRes.m_sError = *pError;
}
}
}
// one or more queries against one and same set of indexes
void SearchHandler_c::RunSubset ( int iStart, int iEnd )
{
int iQueries = iEnd - iStart;
m_dNQueries = m_dQueries.Slice ( iStart, iQueries );
m_dNAggrResults = m_dAggrResults.Slice ( iStart, iQueries );
m_dNResults = m_dResults.Slice ( iStart, iQueries );
m_dNFailuresSet = m_dFailuresSet.Slice ( iStart, iQueries );
// we've own scoped context here
auto pQueryInfo = new QueryInfo_t;
pQueryInfo->m_pHazardQuery.store ( m_dNQueries.begin(), std::memory_order_release );
ScopedInfo_T<QueryInfo_t> pTlsQueryInfo ( pQueryInfo );
// all my stats
int64_t tmSubset = -sphMicroTimer();
int64_t tmLocal = 0;
int64_t tmCpu = -sphTaskCpuTimer ();
CSphScopedProfile tProf ( m_pProfile, SPH_QSTATE_UNKNOWN );
// prepare for descent
const CSphQuery & tFirst = m_dNQueries.First();
m_dNAggrResults.Apply ( [] ( AggrResult_t & r ) { r.m_iSuccesses = 0; } );
if ( iQueries==1 && m_pProfile )
{
m_dNAggrResults.First().m_pProfile = m_pProfile;
m_tHook.SetProfiler ( m_pProfile );
}
// check for facets
m_bFacetQueue = iQueries>1;
for ( int iCheck = 1; iCheck<m_dNQueries.GetLength () && m_bFacetQueue; ++iCheck )
if ( !m_dNQueries[iCheck].m_bFacet )
m_bFacetQueue = false;
m_bMultiQueue = m_bFacetQueue || CheckMultiQuery();
////////////////////////////
// build local indexes list
////////////////////////////
VecRefPtrsAgentConn_t dRemotes;
CSphVector<DistrServedByAgent_t> dDistrServedByAgent;
int iDivideLimits = 1;
auto fnError = AtScopeExit ( [this]()
{
if ( !m_sError.IsEmpty() )
m_dNAggrResults.for_each ( [this] ( auto& r ) { r.m_sError = (CSphString) m_sError; } );
});
if ( BuildIndexList ( iDivideLimits, dRemotes, dDistrServedByAgent ) )
{
// process query to meta, as myindex.status, etc.
if ( !tFirst.m_dStringSubkeys.IsEmpty () )
{
// if apply subkeys ... else return
if ( !ParseIdxSubkeys () )
return;
} else if ( !AcquireInvokedIndexes () ) // usual query processing
return;
} else
{
// process query to @@*, as @@system.threads, etc.
if ( !ParseSysVar () )
return;
// here we deal
}
// at this point m_dLocal contains list of valid local indexes (i.e., existing ones),
// and these indexes are also rlocked and available by calling m_dAcquired.Get()
// sanity check
if ( dRemotes.IsEmpty() && m_dLocal.IsEmpty() )
{
m_sError << "no enabled tables to search";
return;
}
if ( m_dNQueries[0].m_iLimit==-1 && ( !dRemotes.IsEmpty () || m_dLocal.GetLength ()>1 ) )
{
m_sError << "only one local table allowed in streaming select";
return;
}
// select lists must have no expressions
if ( m_bMultiQueue )
m_bMultiQueue = AllowsMulti ();
assert ( !m_bFacetQueue || AllowsMulti () );
if ( !m_bMultiQueue )
m_bFacetQueue = false;
///////////////////////////////////////////////////////////
// main query loop (with multiple retries for distributed)
///////////////////////////////////////////////////////////
// connect to remote agents and query them, if required
std::unique_ptr<SearchRequestBuilder_c> tReqBuilder;
CSphRefcountedPtr<RemoteAgentsObserver_i> tReporter { nullptr };
std::unique_ptr<ReplyParser_i> tParser;
if ( !dRemotes.IsEmpty() )
{
SwitchProfile(m_pProfile, SPH_QSTATE_DIST_CONNECT);
tReqBuilder = std::make_unique<SearchRequestBuilder_c> ( m_dNQueries, iDivideLimits );
tParser = std::make_unique<SearchReplyParser_c> ( iQueries );
tReporter = GetObserver();
// run remote queries. tReporter will tell us when they're finished.
// also blackholes will be removed from this flow of remotes.
ScheduleDistrJobs ( dRemotes, tReqBuilder.get (),
tParser.get (),
tReporter, tFirst.m_iRetryCount, tFirst.m_iRetryDelay );
}
/////////////////////
// run local queries
//////////////////////
// while the remote queries are running, do local searches
if ( m_dLocal.GetLength() )
{
SetupLocalDF();
SwitchProfile ( m_pProfile, SPH_QSTATE_LOCAL_SEARCH );
m_bNeedDocIDs = m_dLocal.GetLength()+dRemotes.GetLength()>1;
tmLocal = -sphMicroTimer();
tmCpu -= sphTaskCpuTimer ();
RunLocalSearches();
tmCpu += sphTaskCpuTimer ();
tmLocal += sphMicroTimer();
}
///////////////////////
// poll remote queries
///////////////////////
if ( !dRemotes.IsEmpty() )
{
SwitchProfile ( m_pProfile, SPH_QSTATE_DIST_WAIT );
bool bDistDone = false;
while ( !bDistDone )
{
// don't forget to check incoming replies after send was over
bDistDone = tReporter->IsDone();
if ( !bDistDone )
tReporter->WaitChanges (); /// wait one or more remote queries to complete. Note! M.b. context switch!
ARRAY_FOREACH ( iAgent, dRemotes )
{
AgentConn_t * pAgent = dRemotes[iAgent];
assert ( !pAgent->IsBlackhole () ); // must not be any blacknole here.
if ( !pAgent->m_bSuccess )
continue;
sphLogDebugv ( "agent %d, state %s, order %d, sock %d", iAgent, pAgent->StateName(), pAgent->m_iStoreTag, pAgent->m_iSock );
DistrServedByAgent_t * pDistr = nullptr;
for ( auto &tDistr : dDistrServedByAgent )
if ( tDistr.m_dAgentIds.Contains ( iAgent ) )
{
pDistr = &tDistr;
break;
}
assert ( pDistr );
// merge this agent's results
for ( int iRes = 0; iRes<iQueries; ++iRes )
{
auto pResult = ( cSearchResult * ) pAgent->m_pResult.get ();
if ( !pResult )
continue;
auto &tRemoteResult = pResult->m_dResults[iRes];
// copy errors or warnings
if ( !tRemoteResult.m_sError.IsEmpty() )
m_dNFailuresSet[iRes].SubmitEx ( tFirst.m_sIndexes, nullptr,
"agent %s: remote query error: %s",
pAgent->m_tDesc.GetMyUrl().cstr(), tRemoteResult.m_sError.cstr() );
if ( !tRemoteResult.m_sWarning.IsEmpty() )
m_dNFailuresSet[iRes].SubmitEx ( tFirst.m_sIndexes, nullptr,
"agent %s: remote query warning: %s",
pAgent->m_tDesc.GetMyUrl().cstr(), tRemoteResult.m_sWarning.cstr() );
if ( tRemoteResult.m_iSuccesses<=0 )
continue;
AggrResult_t & tRes = m_dNAggrResults[iRes];
++tRes.m_iSuccesses;
assert ( tRemoteResult.m_dResults.GetLength() == 1 ); // by design remotes return one chunk
auto & dRemoteChunk = tRes.m_dResults.Add ();
::Swap ( dRemoteChunk, *tRemoteResult.m_dResults.begin () );
// note how we do NOT add per-index weight here
// merge this agent's stats
tRes.m_iTotalMatches += tRemoteResult.m_iTotalMatches;
tRes.m_bTotalMatchesApprox |= tRemoteResult.m_bTotalMatchesApprox;
tRes.m_iQueryTime += tRemoteResult.m_iQueryTime;
tRes.m_iAgentCpuTime += tRemoteResult.m_iCpuTime;
tRes.m_tAgentIOStats.Add ( tRemoteResult.m_tIOStats );
tRes.m_iAgentPredictedTime += tRemoteResult.m_iPredictedTime;
tRes.m_iAgentFetchedDocs += tRemoteResult.m_iAgentFetchedDocs;
tRes.m_iAgentFetchedHits += tRemoteResult.m_iAgentFetchedHits;
tRes.m_iAgentFetchedSkips += tRemoteResult.m_iAgentFetchedSkips;
tRes.m_bHasPrediction |= ( m_dNQueries[iRes].m_iMaxPredictedMsec>0 );
if ( pDistr )
{
pDistr->m_dStats[iRes].m_uQueryTime += tRemoteResult.m_iQueryTime;
pDistr->m_dStats[iRes].m_uFoundRows += tRemoteResult.m_iTotalMatches;
++pDistr->m_dStats[iRes].m_iSuccesses;
}
// merge this agent's words
tRes.MergeWordStats ( tRemoteResult );
}
// dismissed
if ( pAgent->m_pResult )
pAgent->m_pResult->Reset ();
pAgent->m_bSuccess = false;
pAgent->m_sFailure = "";
}
} // while ( !bDistDone )
// submit failures from failed agents
// copy timings from all agents
for ( const AgentConn_t * pAgent : dRemotes )
{
assert ( !pAgent->IsBlackhole () ); // must not be any blacknole here.
for ( int j=iStart; j<iEnd; ++j )
{
assert ( pAgent->m_iWall>=0 );
m_dAgentTimes[j].Add ( ( pAgent->m_iWall ) / ( 1000 * iQueries ) );
}
if ( !pAgent->m_bSuccess && !pAgent->m_sFailure.IsEmpty() )
for ( int j=0; j<iQueries; ++j )
m_dNFailuresSet[j].SubmitEx ( tFirst.m_sIndexes, nullptr, "agent %s: %s",
pAgent->m_tDesc.GetMyUrl().cstr(), pAgent->m_sFailure.cstr() );
}
}
/////////////////////
// merge all results
/////////////////////
SwitchProfile ( m_pProfile, SPH_QSTATE_AGGREGATE );
CSphIOStats tIO;
for ( int iRes=0; iRes<iQueries; ++iRes )
{
sph::StringSet hExtra;
for ( const CSphString & sExtra : m_dExtraSchema )
hExtra.Add ( sExtra );
AggrResult_t & tRes = m_dNAggrResults[iRes];
const CSphQuery & tQuery = m_dNQueries[iRes];
// minimize sorters needs these pointers
tIO.Add ( tRes.m_tIOStats );
// if there were no successful searches at all, this is an error
if ( !tRes.m_iSuccesses )
{
StringBuilder_c sFailures;
m_dNFailuresSet[iRes].BuildReport ( sFailures );
sFailures.MoveTo (tRes.m_sError);
continue;
}
if ( tRes.m_dResults.IsEmpty () ) // fixup. It is easier to have single empty result, then check each time.
{
auto& tEmptyRes = tRes.m_dResults.Add ();
tEmptyRes.m_tSchema = tRes.m_tSchema;
}
// minimize schema and remove dupes
// assuming here ( tRes.m_tSchema==tRes.m_dSchemas[0] )
const CSphFilterSettings * pAggrFilter = nullptr;
if ( m_bMaster && !tQuery.m_tHaving.m_sAttrName.IsEmpty() )
pAggrFilter = &tQuery.m_tHaving;
const CSphVector<CSphQueryItem> & dItems = ( tQuery.m_dRefItems.GetLength() ? tQuery.m_dRefItems : tQuery.m_dItems );
if ( tRes.m_iSuccesses>1 || dItems.GetLength() || pAggrFilter )
{
if ( m_bMaster && tRes.m_iSuccesses && dItems.GetLength() && tQuery.m_sGroupBy.IsEmpty() && tRes.GetLength()==0 )
{
for ( auto& dItem : dItems )
{
if ( dItem.m_sExpr=="count(*)" || ( dItem.m_sExpr=="@distinct" ) )
tRes.m_dZeroCount.Add ( dItem.m_sAlias );
}
}
bool bOk = MinimizeAggrResult ( tRes, tQuery, !m_dLocal.IsEmpty(), hExtra, m_pProfile, pAggrFilter, m_bFederatedUser, m_bMaster );
if ( !bOk )
{
tRes.m_iSuccesses = 0;
continue;
}
} else if ( !tRes.m_dResults.IsEmpty() )
{
tRes.m_tSchema = tRes.m_dResults.First ().m_tSchema;
Debug ( tRes.m_bOneSchema = true; )
}
if ( !m_dNFailuresSet[iRes].IsEmpty() )
{
StringBuilder_c sFailures;
m_dNFailuresSet[iRes].BuildReport ( sFailures );
sFailures.MoveTo ( tRes.m_sWarning );
}
CheckExpansion ( tRes );
////////////
// finalize
////////////
tRes.m_iOffset = Max ( tQuery.m_iOffset, tQuery.m_iOuterOffset );
auto iLimit = ( tQuery.m_iOuterLimit ? tQuery.m_iOuterLimit : tQuery.m_iLimit );
tRes.m_iCount = Max ( Min ( iLimit, tRes.GetLength()-tRes.m_iOffset ), 0 );
tRes.m_iMatches = tRes.m_iCount;
for ( const auto & tLocal : m_dLocal )
tRes.m_dIndexNames.Add ( tLocal.m_sName );
}
// pop up facet error from one of the query to the front
FillupFacetError ( iQueries, m_dQueries, m_dNAggrResults );
/////////////////////////////////
// functions on a table argument
/////////////////////////////////
for ( int i=0; i<iQueries; ++i )
{
AggrResult_t & tRes = m_dNAggrResults[i];
auto& pTableFunc = m_dTables[iStart+i];
// FIXME! log such queries properly?
if ( pTableFunc )
{
SwitchProfile ( m_pProfile, SPH_QSTATE_TABLE_FUNC );
if ( !pTableFunc->Process ( &tRes, tRes.m_sError ) )
tRes.m_iSuccesses = 0;
}
}
/////////
// stats
/////////
tmSubset += sphMicroTimer();
tmCpu += sphTaskCpuTimer();
CalcTimeStats ( tmCpu, tmSubset, dDistrServedByAgent );
CalcPerIndexStats ( dDistrServedByAgent );
CalcGlobalStats ( tmCpu, tmSubset, tmLocal, tIO, dRemotes );
}
bool CheckCommandVersion ( WORD uVer, WORD uDaemonVersion, ISphOutputBuffer & tOut )
{
if ( ( uVer>>8)!=( uDaemonVersion>>8) )
{
SendErrorReply ( tOut, "major command version mismatch (expected v.%d.x, got v.%d.%d)",
uDaemonVersion>>8, uVer>>8, uVer&0xff );
return false;
}
if ( uVer>uDaemonVersion )
{
SendErrorReply ( tOut, "client version is higher than daemon version (client is v.%d.%d, daemon is v.%d.%d)",
uVer>>8, uVer&0xff, uDaemonVersion>>8, uDaemonVersion&0xff );
return false;
}
return true;
}
bool IsMaxedOut ()
{
if ( session::GetVip () )
return false;
if ( session::GetBuddy() )
return false;
if ( g_iThdQueueMax!=0 )
return GlobalWorkPool()->Works() > g_iThdQueueMax; // that is "jobs_queue_size" param of searchd conf, "work_queue_length" in 'show status', or "Queue:" in 'status'
if ( g_iMaxConnection!=0 )
return myinfo::CountClients() > g_iMaxConnection; // that is "max_connections" param of searchd.conf, "workers_clients" in 'show status', or "Clients:" in 'status'
return false;
}
bool IsReadOnly ()
{
return session::GetReadOnly();
}
bool sphCheckWeCanModify()
{
return !IsReadOnly();
}
bool sphCheckWeCanModify ( StmtErrorReporter_i & tOut )
{
if ( sphCheckWeCanModify() )
return true;
tOut.Error ( "connection is read-only");
return false;
}
bool sphCheckWeCanModify ( CSphString & sError )
{
if ( sphCheckWeCanModify() )
return true;
sError = "connection is read-only";
return false;
}
bool sphCheckWeCanModify ( RowBuffer_i & tOut )
{
if ( sphCheckWeCanModify() )
return true;
tOut.Error ( "connection is read-only" );
return false;
}
void HandleCommandSearch ( ISphOutputBuffer & tOut, WORD uVer, InputBuffer_c & tReq )
{
MEMORY ( MEM_API_SEARCH );
if ( !CheckCommandVersion ( uVer, VER_COMMAND_SEARCH, tOut ) )
return;
const WORD MIN_VERSION = 0x119;
if ( uVer<MIN_VERSION )
{
SendErrorReply ( tOut, "client version is too old; upgrade your client (client is v.%d.%d, min is v.%d.%d)", uVer>>8, uVer&0xff, MIN_VERSION>>8, MIN_VERSION&0xff );
return;
}
int iMasterVer = tReq.GetInt();
if ( iMasterVer<0 || iMasterVer>VER_COMMAND_SEARCH_MASTER )
{
SendErrorReply ( tOut, "master-agent version mismatch; update me first, then update master!" );
return;
}
WORD uMasterVer { WORD (iMasterVer) };
bool bAgentMode = ( uMasterVer>0 );
// parse request
int iQueries = tReq.GetDword ();
if ( g_iMaxBatchQueries>0 && ( iQueries<=0 || iQueries>g_iMaxBatchQueries ) )
{
SendErrorReply ( tOut, "bad multi-query count %d (must be in 1..%d range)", iQueries, g_iMaxBatchQueries );
return;
}
SearchHandler_c tHandler ( iQueries, nullptr, QUERY_API, ( iMasterVer==0 ) );
for ( auto &dQuery : tHandler.m_dQueries )
if ( !ParseSearchQuery ( tReq, tOut, dQuery, uVer, uMasterVer ) )
return;
if ( !tHandler.m_dQueries.IsEmpty() )
{
QueryType_e eQueryType = tHandler.m_dQueries[0].m_eQueryType;
#ifndef NDEBUG
// we assume that all incoming queries have the same type
for ( const auto & i: tHandler.m_dQueries )
assert ( i.m_eQueryType==eQueryType );
#endif
std::unique_ptr<QueryParser_i> pParser;
if ( eQueryType==QUERY_JSON )
pParser = sphCreateJsonQueryParser();
else
pParser = sphCreatePlainQueryParser();
assert ( pParser );
tHandler.SetQueryParser ( std::move ( pParser ), eQueryType );
const CSphQuery & q = tHandler.m_dQueries[0];
myinfo::SetTaskInfo ( R"(api-search query="%s" comment="%s" table="%s")", q.m_sQuery.scstr (), q.m_sComment.scstr (), q.m_sIndexes.scstr () );
}
// run queries, send response
tHandler.RunQueries();
auto tReply = APIAnswer ( tOut, VER_COMMAND_SEARCH );
ARRAY_FOREACH ( i, tHandler.m_dQueries )
SendResult ( uVer, tOut, tHandler.m_dAggrResults[i], bAgentMode, tHandler.m_dQueries[i], uMasterVer );
int64_t iTotalPredictedTime = 0;
int64_t iTotalAgentPredictedTime = 0;
for ( const auto& dResult : tHandler.m_dAggrResults )
{
iTotalPredictedTime += dResult.m_iPredictedTime;
iTotalAgentPredictedTime += dResult.m_iAgentPredictedTime;
}
auto & g_tStats = gStats ();
g_tStats.m_iPredictedTime.fetch_add ( iTotalPredictedTime, std::memory_order_relaxed );
g_tStats.m_iAgentPredictedTime.fetch_add ( iTotalAgentPredictedTime, std::memory_order_relaxed );
ScWL_t dLastMetaLock ( g_tLastMetaLock );
g_tLastMeta = tHandler.m_dAggrResults.Last();
}
//////////////////////////////////////////////////////////////////////////
// TABLE FUNCTIONS
//////////////////////////////////////////////////////////////////////////
// table functions take an arbitrary result set as their input,
// and return a new, processed, (completely) different one as their output
//
// 1st argument should be the input result set, but a table function
// can optionally take and handle more arguments
//
// table function can completely (!) change the result set
// including (!) the schema
//
// for now, only builtin table functions are supported
// UDFs are planned when the internal call interface is stabilized
#define LOC_ERROR(_msg) { sError = _msg; return false; }
#define LOC_ERROR1(_msg,_arg1) { sError.SetSprintf ( _msg, _arg1 ); return false; }
class CSphTableFuncRemoveRepeats final : public ISphTableFunc
{
CSphString m_sCol;
int m_iOffset;
int m_iLimit;
public:
bool ValidateArgs ( const StrVec_t & dArgs, const CSphQuery &, CSphString & sError ) final
{
if ( dArgs.GetLength()!=3 )
LOC_ERROR ( "REMOVE_REPEATS() requires 4 arguments (result_set, column, offset, limit)" );
if ( !isdigit ( *dArgs[1].cstr() ) )
LOC_ERROR ( "REMOVE_REPEATS() argument 3 (offset) must be integer" );
if ( !isdigit ( *dArgs[2].cstr() ) )
LOC_ERROR ( "REMOVE_REPEATS() argument 4 (limit) must be integer" );
m_sCol = dArgs[0];
m_iOffset = atoi ( dArgs[1].cstr() );
m_iLimit = atoi ( dArgs[2].cstr() );
if ( !m_iLimit )
LOC_ERROR ( "REMOVE_REPEATS() argument 4 (limit) must be greater than 0" );
return true;
}
bool Process ( AggrResult_t * pResult, CSphString & sError ) final
{
assert ( pResult );
assert ( pResult->m_bOneSchema );
assert ( pResult->m_bSingle );
assert ( !pResult->m_dResults.IsEmpty () );
auto& dMatches = pResult->m_dResults.First().m_dMatches;
if ( dMatches.IsEmpty() )
return true;
// get subset expressing 'LIMIT N,M'
// LIMIT N,M clause must be applied before (!) table function
// so we scan source matches N to N+M-1
//
// within those matches, we filter out repeats in a given column,
// skip first m_iOffset eligible ones, and emit m_iLimit more
auto dSubMatches = dMatches.Slice ( pResult->m_iOffset, pResult->m_iCount );
if ( dSubMatches.IsEmpty() )
return true;
const CSphColumnInfo * pCol = pResult->m_tSchema.GetAttr ( m_sCol.cstr() );
if ( !pCol )
LOC_ERROR1 ( "REMOVE_REPEATS() argument 2 (column %s) not found in result set", m_sCol.cstr() );
ESphAttr t = pCol->m_eAttrType;
if ( t!=SPH_ATTR_INTEGER && t!=SPH_ATTR_BIGINT && t!=SPH_ATTR_TOKENCOUNT && t!=SPH_ATTR_STRINGPTR && t!=SPH_ATTR_STRING )
LOC_ERROR1 ( "REMOVE_REPEATS() argument 2 (column %s) must be of INTEGER, BIGINT, or STRINGPTR type", m_sCol.cstr() );
// we need to initialize the "last seen" value with a key that
// is guaranteed to be different from the 1st match that we will scan
// hence (val-1) for scalars, and NULL for strings
SphAttr_t iLastValue = ( t==SPH_ATTR_STRING || t==SPH_ATTR_STRINGPTR )
? 0
: ( dSubMatches.First().GetAttr ( pCol->m_tLocator ) - 1 );
int iOutPos = 0;
for ( auto& dMatch : dSubMatches )
{
// get value, skip repeats
SphAttr_t iCur = dMatch.GetAttr ( pCol->m_tLocator );
if ( iCur==iLastValue )
continue;
if ( iCur && iLastValue && t==SPH_ATTR_STRINGPTR )
{
auto a = sphUnpackPtrAttr ((const BYTE *) iCur );
auto b = sphUnpackPtrAttr ((const BYTE *) iLastValue );
if ( a.second==b.second && memcmp ( a.first, b.first, a.second )==0 )
continue;
}
iLastValue = iCur;
// skip eligible rows according to tablefunc offset
if ( m_iOffset>0 )
{
--m_iOffset;
continue;
}
// emit!
Swap ( dMatches[iOutPos], dMatch );
// break if we reached the tablefunc limit
if ( ++iOutPos==m_iLimit )
break;
}
// adjust the result set limits
pResult->ClampMatches ( iOutPos );
pResult->m_iOffset = 0;
pResult->m_iCount = dMatches.GetLength();
return true;
}
};
std::unique_ptr<ISphTableFunc> CreateRemoveRepeats()
{
return std::make_unique<CSphTableFuncRemoveRepeats>();
}
#undef LOC_ERROR1
#undef LOC_ERROR
//////////////////////////////////////////////////////////////////////////
// SQL PARSER
//////////////////////////////////////////////////////////////////////////
// FIXME? verify or generate these automatically somehow?
static const char * g_dSqlStmts[] =
{
"parse_error", "dummy", "select", "insert", "replace", "delete", "show_warnings",
"show_status", "show_meta", "set", "begin", "commit", "rollback", "call",
"desc", "show_tables", "create_table", "create_table_like", "drop_table", "show_create_table", "update", "create_func",
"drop_func", "attach_index", "flush_rtindex", "flush_ramchunk", "show_variables", "truncate_rtindex",
"select_columns", "show_collation", "show_character_set", "optimize_index", "show_agent_status",
"show_index_status", "show_index_status", "show_profile", "alter_add", "alter_drop", "alter_modify", "show_plan",
"show_databases", "create_plugin", "drop_plugin", "show_plugins", "show_threads",
"facet", "alter_reconfigure", "show_index_settings", "flush_index", "reload_plugins", "reload_index",
"flush_hostnames", "flush_logs", "reload_indexes", "sysfilters", "debug", "alter_killlist_target",
"alter_index_settings", "join_cluster", "cluster_create", "cluster_delete", "cluster_index_add",
"cluster_index_delete", "cluster_update", "explain", "import_table", "freeze_indexes", "unfreeze_indexes",
"show_settings", "alter_rebuild_si", "kill", "show_locks"
};
STATIC_ASSERT ( sizeof(g_dSqlStmts)/sizeof(g_dSqlStmts[0])==STMT_TOTAL, STMT_DESC_SHOULD_BE_SAME_AS_STMT_TOTAL );
//////////////////////////////////////////////////////////////////////////
class CSphMatchVariant
{
public:
inline static SphAttr_t ToInt ( const SqlInsert_t & tVal )
{
switch ( tVal.m_iType )
{
case SqlInsert_t::QUOTED_STRING: return strtoul ( tVal.m_sVal.cstr(), NULL, 10 ); // FIXME? report conversion error?
case SqlInsert_t::CONST_INT: return int(tVal.GetValueInt());
case SqlInsert_t::CONST_FLOAT: return int(tVal.m_fVal); // FIXME? report conversion error
}
return 0;
}
inline static SphAttr_t ToBigInt ( const SqlInsert_t & tVal )
{
switch ( tVal.m_iType )
{
case SqlInsert_t::QUOTED_STRING: return strtoll ( tVal.m_sVal.cstr(), NULL, 10 ); // FIXME? report conversion error?
case SqlInsert_t::CONST_INT: return tVal.GetValueInt();
case SqlInsert_t::CONST_FLOAT: return int64_t(tVal.m_fVal); // FIXME? report conversion error?
}
return 0;
}
inline static SphAttr_t ToBigUint ( const SqlInsert_t & tVal )
{
switch ( tVal.m_iType )
{
case SqlInsert_t::QUOTED_STRING: return strtoull ( tVal.m_sVal.cstr(), NULL, 10 ); // FIXME? report conversion error?
case SqlInsert_t::CONST_INT: return tVal.GetValueUint();
case SqlInsert_t::CONST_FLOAT: return uint64_t(int64_t(tVal.m_fVal)); // FIXME? report conversion error?
}
return 0;
}
static bool ConvertBool ( const SqlInsert_t & tVal, SphAttr_t & tAttr )
{
if ( tVal.m_iType!=SqlInsert_t::QUOTED_STRING )
return false;
if ( tVal.m_sVal.EqN ( "true" ) )
{
tAttr = 1;
return true;
}
if ( tVal.m_sVal.EqN ( "false" ) )
{
tAttr = 0;
return true;
}
return false;
}
static bool ConvertPlainAttr ( const SqlInsert_t & tVal, ESphAttr eTargetType, const CSphString * pName, SphAttr_t & tAttr, bool bDocID, CSphString & sError )
{
tAttr = 0;
switch ( eTargetType )
{
case SPH_ATTR_INTEGER:
case SPH_ATTR_TOKENCOUNT:
tAttr = ToInt(tVal);
break;
case SPH_ATTR_BOOL:
if ( !ConvertBool ( tVal, tAttr ) ) // try to convert true \ false string then number
tAttr = ToInt ( tVal );
break;
case SPH_ATTR_BIGINT:
if ( bDocID )
{
if ( tVal.IsNegativeInt() )
{
sError = "Negative document ids are not allowed";
return false;
}
tAttr = ToBigUint(tVal);
}
else
tAttr = ToBigInt(tVal);
break;
case SPH_ATTR_FLOAT:
if ( tVal.m_iType==SqlInsert_t::QUOTED_STRING )
tAttr = sphF2DW ( (float)strtod ( tVal.m_sVal.cstr(), NULL ) ); // FIXME? report conversion error?
else if ( tVal.m_iType==SqlInsert_t::CONST_INT )
tAttr = sphF2DW ( float(tVal.GetValueInt()) ); // FIXME? report conversion error?
else if ( tVal.m_iType==SqlInsert_t::CONST_FLOAT )
tAttr = sphF2DW ( tVal.m_fVal );
break;
case SPH_ATTR_STRINGPTR:
break;
case SPH_ATTR_TIMESTAMP:
{
if ( pName && tVal.m_iType==SqlInsert_t::QUOTED_STRING )
tAttr = GetUTC ( tVal.m_sVal );
else
tAttr = ToInt(tVal);
}
break;
default:
return false;
};
return true;
}
inline static bool SetAttr ( CSphMatch & tMatch, const CSphAttrLocator & tLoc, const CSphString * pName, const SqlInsert_t & tVal, ESphAttr eTargetType, bool bDocID, CSphString & sError )
{
SphAttr_t tAttr;
if ( ConvertPlainAttr ( tVal, eTargetType, pName, tAttr, bDocID, sError ) )
{
tMatch.SetAttr ( tLoc, tAttr );
return true;
}
return false;
}
inline static void SetDefaultAttr ( CSphMatch & tMatch, const CSphAttrLocator & tLoc, ESphAttr eTargetType )
{
SqlInsert_t tVal;
tVal.m_iType = SqlInsert_t::CONST_INT;
tVal.SetValueInt(0);
CSphString sError;
SetAttr ( tMatch, tLoc, nullptr, tVal, eTargetType, false, sError );
}
};
/////////////////////////////////////////////////////////////////////////////
// EXCERPTS HANDLER
/////////////////////////////////////////////////////////////////////////////
enum eExcerpt_Flags
{
EXCERPT_FLAG_REMOVESPACES = 1, // deprecated
EXCERPT_FLAG_EXACTPHRASE = 2, // deprecated
EXCERPT_FLAG_SINGLEPASSAGE = 4,
EXCERPT_FLAG_USEBOUNDARIES = 8,
EXCERPT_FLAG_WEIGHTORDER = 16,
EXCERPT_FLAG_QUERY = 32, // deprecated
EXCERPT_FLAG_FORCE_ALL_WORDS = 64,
EXCERPT_FLAG_LOAD_FILES = 128,
EXCERPT_FLAG_ALLOW_EMPTY = 256,
EXCERPT_FLAG_EMIT_ZONES = 512,
EXCERPT_FLAG_FILES_SCATTERED = 1024,
EXCERPT_FLAG_FORCEPASSAGES = 2048
};
int PackAPISnippetFlags ( const SnippetQuerySettings_t &q, bool bOnlyScattered = false )
{
int iRawFlags = q.m_iLimitPassages ? EXCERPT_FLAG_SINGLEPASSAGE : 0;
iRawFlags |= q.m_bUseBoundaries ? EXCERPT_FLAG_USEBOUNDARIES : 0;
iRawFlags |= q.m_bWeightOrder ? EXCERPT_FLAG_WEIGHTORDER : 0;
iRawFlags |= q.m_bForceAllWords ? EXCERPT_FLAG_FORCE_ALL_WORDS : 0;
if ( !bOnlyScattered || !( q.m_uFilesMode & 2 ) )
iRawFlags |= ( q.m_uFilesMode & 1 ) ? EXCERPT_FLAG_LOAD_FILES : 0;
iRawFlags |= q.m_bAllowEmpty ? EXCERPT_FLAG_ALLOW_EMPTY : 0;
iRawFlags |= q.m_bEmitZones ? EXCERPT_FLAG_EMIT_ZONES : 0;
iRawFlags |= ( q.m_uFilesMode & 2 ) ? EXCERPT_FLAG_FILES_SCATTERED : 0;
iRawFlags |= q.m_bForcePassages ? EXCERPT_FLAG_FORCEPASSAGES : 0;
return iRawFlags;
}
struct ExcerptQuery_t
{
int64_t m_iSize = 0; ///< file size, to sort to work-queue order
CSphString m_sSource; ///< source data
CSphString m_sError;
CSphVector<BYTE> m_dResult; ///< query result
};
class SnippetRemote_c : public RequestBuilder_i, public ReplyParser_i
{
public:
SnippetRemote_c ( VecTraits_T<ExcerptQuery_t> & dQueries, const SnippetQuerySettings_t& q )
: m_dQueries ( dQueries )
, m_tSettings ( q )
{}
void BuildRequest ( const AgentConn_t & tAgent, ISphOutputBuffer & tOut ) const final;
bool ParseReply ( MemInputBuffer_c & tReq, AgentConn_t & ) const final;
private:
VecTraits_T<ExcerptQuery_t> & m_dQueries;
const SnippetQuerySettings_t & m_tSettings;
mutable std::atomic<int> m_iWorker {0};
bool ParseReplyScattered ( MemInputBuffer_c & tReq, const VecTraits_T<int>& dDocs ) const;
bool ParseReplyNonScattered ( MemInputBuffer_c & tReq, const VecTraits_T<int> & dDocs ) const;
public:
CSphVector<const VecTraits_T<int> *> m_dTasks;
};
void SnippetRemote_c::BuildRequest ( const AgentConn_t & tAgent, ISphOutputBuffer & tOut ) const
{
// it sends either all queries to each agent or sequence of queries to current agent
auto iWorker = tAgent.m_iStoreTag;
if ( iWorker<0 )
{
iWorker = m_iWorker.fetch_add ( 1, std::memory_order_relaxed );
tAgent.m_iStoreTag = iWorker;
}
auto tHdr = APIHeader ( tOut, SEARCHD_COMMAND_EXCERPT, VER_COMMAND_EXCERPT );
tOut.SendInt ( 0 );
tOut.SendInt ( PackAPISnippetFlags ( m_tSettings, true ) );
tOut.SendString ( tAgent.m_tDesc.m_sIndexes.cstr () );
tOut.SendString ( m_tSettings.m_sQuery.cstr() );
tOut.SendString ( m_tSettings.m_sBeforeMatch.cstr() );
tOut.SendString ( m_tSettings.m_sAfterMatch.cstr() );
tOut.SendString ( m_tSettings.m_sChunkSeparator.cstr() );
tOut.SendInt ( m_tSettings.m_iLimit );
tOut.SendInt ( m_tSettings.m_iAround );
tOut.SendInt ( m_tSettings.m_iLimitPassages );
tOut.SendInt ( m_tSettings.m_iLimitWords );
tOut.SendInt ( m_tSettings.m_iPassageId );
tOut.SendString ( m_tSettings.m_sStripMode.cstr() );
tOut.SendString ( PassageBoundarySz ( m_tSettings.m_ePassageSPZ ) );
const auto& dDocs = *m_dTasks[iWorker];
tOut.SendInt ( dDocs.GetLength() );
for ( int iDoc : dDocs )
tOut.SendString ( m_dQueries[iDoc].m_sSource.cstr() );
}
bool SnippetRemote_c::ParseReply ( MemInputBuffer_c & tReq, AgentConn_t & tAgent ) const
{
auto& tDocs = *m_dTasks[tAgent.m_iStoreTag];
if ( m_tSettings.m_uFilesMode & 2 ) // scattered files
return ParseReplyScattered ( tReq, tDocs );
return ParseReplyNonScattered ( tReq, tDocs );
}
bool SnippetRemote_c::ParseReplyScattered ( MemInputBuffer_c & tReq, const VecTraits_T<int> & dDocs ) const
{
bool bOk = true;
for ( int iDoc : dDocs )
{
ExcerptQuery_t & tQuery = m_dQueries[iDoc];
CSphVector<BYTE> & dRes = tQuery.m_dResult;
if ( !tReq.GetString(dRes) || dRes.IsEmpty() )
{
bOk = false;
dRes.Resize(0);
} else
tQuery.m_sError = "";
}
return bOk;
}
bool SnippetRemote_c::ParseReplyNonScattered ( MemInputBuffer_c & tReq, const VecTraits_T<int> & dDocs ) const
{
for ( int iDoc : dDocs )
{
ExcerptQuery_t & tQuery = m_dQueries[iDoc];
tReq.GetString ( tQuery.m_dResult );
tQuery.m_iSize = -1; // means 'processed'
}
return true;
}
static int64_t GetSnippetDataSize ( const CSphVector<ExcerptQuery_t> &dSnippets )
{
int64_t iSize = 0;
for ( const auto & dSnippet: dSnippets )
{
if ( dSnippet.m_iSize>0 )
iSize += dSnippet.m_iSize;
else if ( !dSnippet.m_iSize )
iSize += dSnippet.m_sSource.Length ();
}
iSize /= 100;
return iSize;
}
static VecRefPtrsAgentConn_t GetDistrAgents ( const cDistributedIndexRefPtr_t& pDist )
{
assert ( pDist );
VecRefPtrsAgentConn_t tRemotes;
for ( const auto& pAgent : pDist->m_dAgents )
{
auto * pConn = new AgentConn_t;
pConn->SetMultiAgent ( pAgent );
pConn->m_iMyConnectTimeoutMs = pDist->GetAgentConnectTimeoutMs();
pConn->m_iMyQueryTimeoutMs = pDist->GetAgentQueryTimeoutMs();
tRemotes.Add ( pConn );
}
return tRemotes;
}
// collect source sizes. For absent files set -1.
static bool CollectSourceSizes ( CSphVector<ExcerptQuery_t> & dQueries, bool bFileMode, bool bNeedAll, CSphString & sError )
{
// collect source sizes
if ( !bFileMode )
{
dQueries.Apply ( [] ( ExcerptQuery_t & dQuery ) { dQuery.m_iSize = dQuery.m_sSource.Length (); } );
return true;
}
for ( auto & dQuery : dQueries )
{
CSphString sFilename, sStatError;
sFilename.SetSprintf ( "%s%s", g_sSnippetsFilePrefix.cstr (), dQuery.m_sSource.scstr () );
if ( !TestEscaping ( g_sSnippetsFilePrefix, sFilename ) )
{
sError.SetSprintf ( "File '%s' escapes '%s' scope", sFilename.scstr (), g_sSnippetsFilePrefix.scstr () );
return false;
}
auto iFileSize = sphGetFileSize ( sFilename, &sStatError );
if ( iFileSize<0 )
{
if ( bNeedAll )
{
sError = sStatError;
return false;
}
dQuery.m_iSize = -1;
} else
dQuery.m_iSize = iFileSize;
}
return true;
}
// helper, called both for single and for multi snippets
static inline bool MakeSingleLocalSnippetWithFields ( ExcerptQuery_t & tQuery, const SnippetQuerySettings_t & q,
SnippetBuilder_c * pBuilder, const VecTraits_T<int>& dFields )
{
assert ( pBuilder );
std::unique_ptr<TextSource_i> pSource = CreateSnippetSource ( q.m_uFilesMode, (const BYTE*)tQuery.m_sSource.cstr(), tQuery.m_sSource.Length() );
SnippetResult_t tRes;
if ( !pBuilder->Build ( pSource, tRes ) )
{
tQuery.m_sError = std::move ( tRes.m_sError );
return false;
}
tQuery.m_dResult = pBuilder->PackResult ( tRes, dFields );
return true;
};
// boring single snippet
static inline bool MakeSingleLocalSnippet ( ExcerptQuery_t & tQuery, const SnippetQuerySettings_t & q,
SnippetBuilder_c * pBuilder, CSphString& sError )
{
CSphVector<int> dStubFields;
dStubFields.Add ( 0 );
if ( MakeSingleLocalSnippetWithFields ( tQuery, q, pBuilder, dStubFields ) )
return true;
sError = tQuery.m_sError;
return false;
}
struct SnippedBuilderCtxRef_t
{
SnippetBuilder_c * m_pBuilder;
SnippedBuilderCtxRef_t ( SnippetBuilder_c * pBuilder ) : m_pBuilder ( pBuilder ) {}
inline static bool IsClonable () { return true; }
};
struct SnippedBuilderCtxClone_t : public SnippedBuilderCtxRef_t, ISphNoncopyable
{
explicit SnippedBuilderCtxClone_t ( const SnippedBuilderCtxRef_t& dParent )
: SnippedBuilderCtxRef_t { dParent.m_pBuilder->MakeClone() }
{}
// dtr is only for clones!
~SnippedBuilderCtxClone_t() { SafeDelete (m_pBuilder); }
};
// Starts or performs parallel snippets creation with throttling
static void MakeSnippetsCoro ( const VecTraits_T<int>& dTasks, CSphVector<ExcerptQuery_t> & dQueries,
const SnippetQuerySettings_t& q, SnippetBuilder_c * pBuilder)
{
assert ( pBuilder );
auto iJobs = dTasks.GetLength();
if ( !iJobs )
return;
sphLogDebug ( "MakeSnippetsCoro invoked for %d tasks", iJobs );
CSphVector<int> dStubFields;
dStubFields.Add ( 0 );
// the context
ClonableCtx_T<SnippedBuilderCtxRef_t, SnippedBuilderCtxClone_t, Threads::ECONTEXT::UNORDERED> dCtx { pBuilder };
auto pDispatcher = Dispatcher::Make ( iJobs, 0, GetEffectiveBaseDispatcherTemplate(), dCtx.IsSingle() );
dCtx.LimitConcurrency ( pDispatcher->GetConcurrency() );
Coro::ExecuteN ( dCtx.Concurrency ( iJobs ), [&]
{
sphLogDebug ( "MakeSnippetsCoro Coro started" );
auto pSource = pDispatcher->MakeSource();
int iJob = -1; // make it consumed
if ( !pSource->FetchTask ( iJob ) )
{
sphLogDebug ( "Early finish parallel MakeSnippetsCoro because of empty queue" );
return; // already nothing to do, early finish.
}
auto tJobContext = dCtx.CloneNewContext();
auto& tCtx = tJobContext.first;
sphLogDebug ( "MakeSnippetsCoro cloned context %d", tJobContext.second );
Threads::Coro::SetThrottlingPeriodMS ( session::GetThrottlingPeriodMS() );
while (true)
{
myinfo::SetTaskInfo ( "s %d:", iJob );
sphLogDebugv ( "MakeSnippetsCoro %d %d[%d]", tJobContext.second, iJob, dTasks[iJob] );
MakeSingleLocalSnippetWithFields ( dQueries[dTasks[iJob]], q, tCtx.m_pBuilder, dStubFields );
sphLogDebug ( "MakeSnippetsCoro Coro loop tick %d finished", iJob );
iJob = -1; // mark it consumed
if ( !pSource->FetchTask ( iJob ) )
return; // already nothing to do, early finish.
// yield and reschedule every quant of time. It gives work to other tasks
Threads::Coro::ThrottleAndKeepCrashQuery ();
}
});
}
// divide set of tasks from dTasks into chunks, having most balanced aggregate iSize in each.
static CSphVector<CSphVector<int>> DivideTasks ( const VecTraits_T<int> & dTasks,
const VecTraits_T<ExcerptQuery_t> & dQueries, int iWorkers )
{
CSphVector<CSphVector<int>> dResults;
auto iTasks = dTasks.GetLength();
auto iLimit = Min ( iWorkers, iTasks );
if ( iWorkers>=iTasks )
{
dResults.Resize ( iLimit );
for ( int i=0; i<iLimit; ++i )
dResults[i].Add ( dTasks[i] );
} else
{
// helpers
using ItemsQueue_c = TimeoutQueue_c;
using EnqueuedItem_t = EnqueuedTimeout_t;
ItemsQueue_c qTasks;
struct PriorityVec_t : EnqueuedItem_t { int m_iRefIdx; };
CSphVector<PriorityVec_t> dPriorityResults ( iWorkers );
dResults.Resize ( iWorkers );
// initially fill the queue
ARRAY_FOREACH ( i, dPriorityResults )
{
dResults[i].Add ( dTasks[i] );
dPriorityResults[i].m_iTimeoutTimeUS = dQueries[dTasks[i]].m_iSize;
dPriorityResults[i].m_iRefIdx = i;
qTasks.Change ( &dPriorityResults[i] );
}
// update the queue
for ( int i=iWorkers; i<iTasks; ++i )
{
auto * pBest = (PriorityVec_t *) qTasks.Root ();
dResults[pBest->m_iRefIdx].Add ( dTasks[i] );
pBest->m_iTimeoutTimeUS += dQueries[dTasks[i]].m_iSize;
qTasks.Change ( pBest );
}
}
return dResults;
}
// remote scattered snippets (with local pass)
// * dLocal subset is run on local host
// * dRemote subset is send to each remote agent
static void MakeRemoteScatteredSnippets ( CSphVector<ExcerptQuery_t> & dQueries,
cDistributedIndexRefPtr_t pDist,
SnippetBuilder_c * pBuilder,
const SnippetQuerySettings_t & q,
const VecTraits_T<int>& dLocal,
const VecTraits_T<int>& dAbsent )
{
assert ( pDist );
assert ( pBuilder );
// and finally most interesting remote case with possibly scattered.
auto dAgents = GetDistrAgents ( pDist );
int iRemoteAgents = dAgents.GetLength();
SnippetRemote_c tRemotes ( dQueries, q );
tRemotes.m_dTasks.Resize ( iRemoteAgents );
// on scattered case - just push the chain of locally absent files to all remotes
for ( auto & pTask : tRemotes.m_dTasks )
pTask = &dAbsent;
// query remote building
CSphRefcountedPtr<RemoteAgentsObserver_i> tReporter ( GetObserver () );
ScheduleDistrJobs ( dAgents, &tRemotes, &tRemotes, tReporter );
// start local building and wait it to finish
MakeSnippetsCoro ( dLocal, dQueries, q, pBuilder );
// wait remotes to finish also
tReporter->Finish ();
auto iSuccesses = ( int ) tReporter->GetSucceeded ();
auto iAgentsDone = ( int ) tReporter->GetFinished ();
if ( iSuccesses!=iRemoteAgents )
sphWarning ( "Remote snippets: some of the agents didn't answered: %d queried, %d finished, %d succeeded",
iRemoteAgents, iAgentsDone, iSuccesses );
}
// remote non scattered snippets (with local pass)
// non-scattered assumes, each host has full set of sources, so we don't need to check absent here.
// * divide set of sources among remotes and local host, balancing size.
// * assume dPresent has indexes of monotonically decreasing sizes, that's need for balancing.
static void MakeRemoteNonScatteredSnippets ( CSphVector<ExcerptQuery_t> & dQueries,
cDistributedIndexRefPtr_t pDist,
SnippetBuilder_c * pBuilder,
const SnippetQuerySettings_t & q,
const VecTraits_T<int>& dPresent )
{
assert ( pDist );
assert ( pBuilder );
auto dAgents = GetDistrAgents ( pDist );
int iRemoteAgents = dAgents.GetLength();
SnippetRemote_c tRemotes ( dQueries, q );
tRemotes.m_dTasks.Resize ( iRemoteAgents );
// on non-scattered - distribute set of sources to workers, having 1 local worker in mind.
auto dJobSet = DivideTasks ( dPresent, dQueries, iRemoteAgents+1 ) ; // +1 since we also will work locally.
auto& dLocalSet = dJobSet[iRemoteAgents];
for ( int i = 0; i<iRemoteAgents; ++i )
tRemotes.m_dTasks[i] = &dJobSet[i];
// query remote building
CSphRefcountedPtr<RemoteAgentsObserver_i> tReporter ( GetObserver () );
ScheduleDistrJobs ( dAgents, &tRemotes, &tRemotes, tReporter );
// start local building and wait it to finish
MakeSnippetsCoro ( dLocalSet, dQueries, q, pBuilder );
// wait remotes to finish also
tReporter->Finish ();
auto iSuccesses = ( int ) tReporter->GetSucceeded ();
auto iAgentsDone = ( int ) tReporter->GetFinished ();
if ( iSuccesses==iRemoteAgents )
return;
sphWarning ( "Remote snippets: some of the agents didn't answered: %d queried, %d finished, %d succeeded",
iRemoteAgents, iAgentsDone, iSuccesses );
// let's collect failures and make one more pass over them
CSphVector<int> dFailed;
// collect failed nodes
dPresent.Apply ( [&] ( int iDoc ) {
if ( dQueries[iDoc].m_iSize<0 )
dFailed.Add(iDoc);
});
if ( dFailed.IsEmpty() )
return;
// failsafe - one more turn for failed queries on local agent
sphWarning ( "Snippets: failsafe for %d failed items", (int) dFailed.GetLength() );
MakeSnippetsCoro ( dFailed, dQueries, q, pBuilder );
}
bool MakeSnippets ( CSphString sIndex, CSphVector<ExcerptQuery_t> & dQueries,
const SnippetQuerySettings_t& q, CSphString & sError )
{
assert ( !dQueries.IsEmpty() );
// When both load_files & load_files_scattered set, absent files will be reported as errors.
// load_files_scattered without load_files just omits the absent files (returns empty strings).
auto bScattered = !!( q.m_uFilesMode & 2 );
auto bNeedAllFiles = !!( q.m_uFilesMode & 1 );
auto pDist = GetDistr ( sIndex );
bool bRemote = pDist && !pDist->m_dAgents.IsEmpty ();
if ( bRemote )
{
if ( pDist->m_dLocal.GetLength()!=1 )
{
sError.SetSprintf ( "%s", "distributed table for snippets must have exactly one local agent" );
return false;
}
if ( !q.m_uFilesMode )
{
sError.SetSprintf ( "%s", "distributed table for snippets available only when using external files" );
return false;
}
// for remotes index is 1-st local agent of the distr, so move on!
sIndex = pDist->m_dLocal[0];
}
auto pServed = GetServed ( sIndex );
if ( !pServed )
{
sError.SetSprintf ( "unknown local table '%s' in search request", sIndex.cstr() );
return false;
}
RIdx_c pLocalIndex { pServed };
assert ( pLocalIndex );
///////////////////
/// do highlighting
///////////////////
auto pBuilder = std::make_unique<SnippetBuilder_c>();
pBuilder->Setup ( pLocalIndex, q );
if ( !pBuilder->SetQuery ( q.m_sQuery.cstr(), true, sError ) )
return false;
// boring single snippet
if ( dQueries.GetLength ()==1 )
return MakeSingleLocalSnippet ( dQueries[0], q, pBuilder.get(), sError );
if ( !CollectSourceSizes ( dQueries, q.m_uFilesMode, !bScattered, sError ) )
return false;
// set correct data size for snippets
myinfo::SetTaskInfo ( R"(snippet datasize=%.1Dk query="%s")", GetSnippetDataSize ( dQueries ), q.m_sQuery.scstr () );
// collect list of existing and empty sources
CSphVector<int> dPresent;
CSphVector<int> dAbsent;
ARRAY_FOREACH ( i, dQueries )
{
if ( dQueries[i].m_iSize<0 )
dAbsent.Add(i);
else
dPresent.Add(i);
}
// check if all files are available locally - then we need no remote pass.
if ( bScattered && dAbsent.IsEmpty() )
bRemote = false;
if ( bNeedAllFiles && !dAbsent.IsEmpty() )
for ( int i : dAbsent )
dQueries[i].m_sError.SetSprintf ( "absenthead: failed to stat %s", dQueries[i].m_sSource.cstr () );
// tough jobs first (sort inverse)
if ( !bScattered )
dPresent.Sort ( Lesser ( [&dQueries] ( int a, int b ) { return dQueries[a].m_iSize>dQueries[b].m_iSize; } ) );
if ( !bRemote )
{
// multithreaded, but no remote agents.
MakeSnippetsCoro ( dPresent, dQueries, q, pBuilder.get() );
} else
{
assert ( pDist );
// multithreaded with remotes (scattered and full)
if ( bScattered )
MakeRemoteScatteredSnippets ( dQueries, pDist, pBuilder.get(), q, dPresent, dAbsent );
else
MakeRemoteNonScatteredSnippets ( dQueries, pDist, pBuilder.get (), q, dPresent );
}
StringBuilder_c sErrors ( "; " );
dQueries.Apply ( [&] ( const ExcerptQuery_t & tQuery ) { sErrors << tQuery.m_sError; } );
sErrors.MoveTo ( sError );
return sError.IsEmpty();
}
// throw out tailing \0 if any
inline static void FixupResultTail (CSphVector<BYTE> & dData)
{
if ( !dData.IsEmpty() && !dData.Last () )
dData.Pop ();
}
void HandleCommandExcerpt ( ISphOutputBuffer & tOut, int iVer, InputBuffer_c & tReq )
{
if ( !CheckCommandVersion ( iVer, VER_COMMAND_EXCERPT, tOut ) )
return;
/////////////////////////////
// parse and process request
/////////////////////////////
const int EXCERPT_MAX_ENTRIES = 1024;
// v.1.1
SnippetQuerySettings_t q;
tReq.GetInt (); // mode field is for now reserved and ignored
int iFlags = tReq.GetInt ();
CSphString sIndex = tReq.GetString ();
q.m_sQuery = tReq.GetString ();
q.m_sBeforeMatch = tReq.GetString ();
q.m_sAfterMatch = tReq.GetString ();
q.m_sChunkSeparator = tReq.GetString ();
q.m_iLimit = tReq.GetInt ();
q.m_iAround = tReq.GetInt ();
if ( iVer>=0x102 )
{
q.m_iLimitPassages = tReq.GetInt();
q.m_iLimitWords = tReq.GetInt();
q.m_iPassageId = tReq.GetInt();
q.m_sStripMode = tReq.GetString();
if ( q.m_sStripMode!="none" && q.m_sStripMode!="index" && q.m_sStripMode!="strip" && q.m_sStripMode!="retain" )
{
SendErrorReply ( tOut, "unknown html_strip_mode=%s", q.m_sStripMode.cstr() );
return;
}
}
q.Setup();
CSphString sPassageBoundaryMode;
if ( iVer>=0x103 )
q.m_ePassageSPZ = GetPassageBoundary ( tReq.GetString() );
q.m_bUseBoundaries = ( iFlags & EXCERPT_FLAG_USEBOUNDARIES )!=0;
q.m_bWeightOrder = ( iFlags & EXCERPT_FLAG_WEIGHTORDER )!=0;
q.m_bForceAllWords = ( iFlags & EXCERPT_FLAG_FORCE_ALL_WORDS )!=0;
if ( iFlags & EXCERPT_FLAG_SINGLEPASSAGE )
q.m_iLimitPassages = 1;
q.m_uFilesMode = ( iFlags & EXCERPT_FLAG_LOAD_FILES )?1:0;
bool bScattered = ( iFlags & EXCERPT_FLAG_FILES_SCATTERED )!=0;
q.m_uFilesMode |= bScattered?2:0;
q.m_bAllowEmpty = ( iFlags & EXCERPT_FLAG_ALLOW_EMPTY )!=0;
q.m_bEmitZones = ( iFlags & EXCERPT_FLAG_EMIT_ZONES )!=0;
q.m_bForcePassages = ( iFlags & EXCERPT_FLAG_FORCEPASSAGES )!=0;
bool bExactPhrase = ( iFlags & EXCERPT_FLAG_EXACTPHRASE )!=0;
if ( bExactPhrase )
{
SendErrorReply ( tOut, "exact_phrase is deprecated" );
return;
}
int iCount = tReq.GetInt ();
if ( iCount<=0 || iCount>EXCERPT_MAX_ENTRIES )
{
SendErrorReply ( tOut, "invalid entries count %d", iCount );
return;
}
CSphString sError;
if ( !sphCheckOptionsSPZ ( q, q.m_ePassageSPZ, sError ) )
{
SendErrorReply ( tOut, "%s", sError.cstr() );
return;
}
CSphVector<ExcerptQuery_t> dQueries { iCount };
for ( auto & dQuery : dQueries )
{
dQuery.m_sSource = tReq.GetString (); // fetch data
if ( tReq.GetError() )
{
SendErrorReply ( tOut, "invalid or truncated request" );
return;
}
}
myinfo::SetTaskInfo ( R"(api-snippet datasize=%.1Dk query="%s")", GetSnippetDataSize ( dQueries ), q.m_sQuery.scstr ());
if ( !MakeSnippets ( sIndex, dQueries, q, sError ) )
{
SendErrorReply ( tOut, "%s", sError.cstr() );
return;
}
////////////////
// serve result
////////////////
for ( const auto & i : dQueries )
{
// handle errors
if ( !bScattered && !i.m_sError.IsEmpty() )
{
SendErrorReply ( tOut, "highlighting failed: %s", i.m_sError.cstr() );
return;
}
}
auto tReply = APIAnswer ( tOut, VER_COMMAND_EXCERPT );
for ( const auto & i : dQueries )
tOut.SendArray ( i.m_dResult );
}
/////////////////////////////////////////////////////////////////////////////
// KEYWORDS HANDLER
/////////////////////////////////////////////////////////////////////////////
static bool DoGetKeywords ( const CSphString & sIndex, const CSphString & sQuery, const GetKeywordsSettings_t & tSettings, CSphVector <CSphKeywordInfo> & dKeywords, CSphString & sError, SearchFailuresLog_c & tFailureLog );
static void HandleCommandKeywords ( ISphOutputBuffer & tOut, WORD uVer, InputBuffer_c & tReq )
{
if ( !CheckCommandVersion ( uVer, VER_COMMAND_KEYWORDS, tOut ) )
return;
GetKeywordsSettings_t tSettings;
CSphString sQuery = tReq.GetString ();
CSphString sIndex = tReq.GetString ();
tSettings.m_bStats = !!tReq.GetInt ();
if ( uVer>=0x101 )
{
tSettings.m_bFoldLemmas = !!tReq.GetInt ();
tSettings.m_bFoldBlended = !!tReq.GetInt ();
tSettings.m_bFoldWildcards = !!tReq.GetInt ();
tSettings.m_iExpansionLimit = tReq.GetInt ();
}
if ( uVer>=0x102 )
tSettings.m_eJiebaMode = (JiebaMode_e)tReq.GetInt();
CSphString sError;
SearchFailuresLog_c tFailureLog;
CSphVector < CSphKeywordInfo > dKeywords;
bool bOk = DoGetKeywords ( sIndex, sQuery, tSettings, dKeywords, sError, tFailureLog );
if ( !bOk )
{
SendErrorReply ( tOut, "%s", sError.cstr() );
return;
}
// just log distribute index error as command has no warning filed to pass such error into
if ( !tFailureLog.IsEmpty() )
{
StringBuilder_c sErrorBuf;
tFailureLog.BuildReport ( sErrorBuf );
sphWarning ( "%s", sErrorBuf.cstr() );
}
auto tReply = APIAnswer ( tOut, VER_COMMAND_KEYWORDS );
tOut.SendInt ( dKeywords.GetLength () );
for ( auto & dKeyword : dKeywords )
{
tOut.SendString ( dKeyword.m_sTokenized.cstr () );
tOut.SendString ( dKeyword.m_sNormalized.cstr () );
if ( uVer>=0x101 )
tOut.SendInt ( dKeyword.m_iQpos );
if ( tSettings.m_bStats )
{
tOut.SendInt ( dKeyword.m_iDocs );
tOut.SendInt ( dKeyword.m_iHits );
}
}
}
/////////////////////////////////////////////////////////////////////////////
// UPDATES HANDLER
/////////////////////////////////////////////////////////////////////////////
class UpdateRequestBuilder_c : public RequestBuilder_i
{
public:
explicit UpdateRequestBuilder_c ( AttrUpdateSharedPtr_t pUpd ) : m_pUpd ( pUpd ) {}
void BuildRequest ( const AgentConn_t & tAgent, ISphOutputBuffer& tOut ) const final;
protected:
AttrUpdateSharedPtr_t m_pUpd;
};
class UpdateReplyParser_c : public ReplyParser_i
{
public:
explicit UpdateReplyParser_c ( int * pUpd )
: m_pUpdated ( pUpd )
{}
bool ParseReply ( MemInputBuffer_c & tReq, AgentConn_t & ) const final
{
*m_pUpdated += tReq.GetDword ();
return true;
}
protected:
int * m_pUpdated;
};
void UpdateRequestBuilder_c::BuildRequest ( const AgentConn_t & tAgent, ISphOutputBuffer & tOut ) const
{
const char * sIndexes = tAgent.m_tDesc.m_sIndexes.cstr();
assert ( m_pUpd->m_dAttributes.all_of ( [&] ( const TypedAttribute_t & tAttr ) { return ( tAttr.m_eType!=SPH_ATTR_INT64SET ); } ) );
auto& tUpd = *m_pUpd;
// API header
auto tHdr = APIHeader ( tOut, SEARCHD_COMMAND_UPDATE, VER_COMMAND_UPDATE );
tOut.SendString ( sIndexes );
tOut.SendInt ( tUpd.m_dAttributes.GetLength() );
tOut.SendInt ( tUpd.m_bIgnoreNonexistent ? 1 : 0 );
for ( const auto & i : tUpd.m_dAttributes )
{
tOut.SendString ( i.m_sName.cstr() );
UpdateType_e eUpdate;
switch ( i.m_eType )
{
case SPH_ATTR_UINT32SET: eUpdate = UPDATE_MVA32; break;
case SPH_ATTR_STRING:
case SPH_ATTR_JSON: eUpdate = UPDATE_STRING; break;
default: eUpdate = UPDATE_INT; break;
};
tOut.SendInt ( eUpdate );
}
tOut.SendInt ( tUpd.m_dDocids.GetLength() );
ARRAY_FOREACH ( iDoc, tUpd.m_dDocids )
{
tOut.SendUint64 ( tUpd.m_dDocids[iDoc] );
const DWORD* pPool = tUpd.m_dPool.Begin() + tUpd.GetRowOffset ( iDoc );
for ( const auto & i : tUpd.m_dAttributes )
{
DWORD uVal = *pPool++;
switch ( i.m_eType )
{
case SPH_ATTR_UINT32SET:
{
// size down in case of MVA
// MVA stored as mva64 in pool but API could handle only mva32 due to HandleCommandUpdate
// SphinxQL only could work either mva32 or mva64 and only SphinxQL could receive mva64 updates
// SphinxQL master communicate to agent via SphinxqlRequestBuilder_c
const DWORD * pEnd = pPool + uVal;
tOut.SendDword ( uVal/2 );
while ( pPool<pEnd )
{
auto iVal = *(int64_t*)pPool;
tOut.SendDword ( iVal&0xFFFFFFFF );
pPool += 2;
}
}
break;
case SPH_ATTR_STRING:
case SPH_ATTR_JSON:
{
DWORD uBlobLen = *pPool++;
tOut.SendDword ( uBlobLen );
tOut.SendBytes ( tUpd.m_dBlobs.Begin()+uVal, uBlobLen );
}
break;
default:
tOut.SendDword ( uVal );
break;
}
}
}
}
static void DoCommandUpdate ( const CSphString & sIndex, const CSphString& sCluster, const char * sDistributed, AttrUpdateSharedPtr_t pUpd,
bool bBlobUpdate, int & iSuccesses, int & iUpdated, SearchFailuresLog_c & dFails )
{
TRACE_CORO ( "rt", "DoCommandUpdate" );
int iUpd = 0;
CSphString sWarning;
RtAccum_t tAcc;
ReplicationCommand_t* pCmd = tAcc.AddCommand ( ReplCmd_e::UPDATE_API, sIndex, sCluster );
assert ( pCmd );
pCmd->m_pUpdateAPI = std::move(pUpd);
pCmd->m_bBlobUpdate = bBlobUpdate;
HandleCmdReplicateUpdate ( tAcc, sWarning, iUpd );
if ( iUpd<0 )
{
dFails.Submit ( sIndex, sDistributed, TlsMsg::szError() );
} else
{
iUpdated += iUpd;
++iSuccesses;
if ( sWarning.Length() )
dFails.Submit ( sIndex, sDistributed, sWarning.cstr() );
}
}
using DistrPtrs_t = VecRefPtrs_t< const DistributedIndex_t *>;
static bool ExtractDistributedIndexes ( const StrVec_t &dNames, DistrPtrs_t &dDistributed, CSphString& sMissed )
{
dDistributed.Reset();
dDistributed.Resize( dNames.GetLength () );
dDistributed.ZeroVec ();
ARRAY_FOREACH ( i, dNames )
{
if ( !g_pLocalIndexes->Contains ( dNames[i] ) )
{
// search amongst distributed and copy for further processing
dDistributed[i] = GetDistr ( dNames[i] );
if ( !dDistributed[i] )
{
sMissed = dNames[i];
return false;
}
dDistributed[i]->AddRef ();
}
}
return true;
}
void HandleCommandUpdate ( ISphOutputBuffer & tOut, int iVer, InputBuffer_c & tReq )
{
if ( !CheckCommandVersion ( iVer, VER_COMMAND_UPDATE, tOut ) )
return;
// parse request
CSphString sIndexes = tReq.GetString ();
AttrUpdateSharedPtr_t pUpd { new CSphAttrUpdate };
CSphAttrUpdate& tUpd = *pUpd;
CSphVector<DWORD> dMva;
tUpd.m_dAttributes.Resize ( tReq.GetDword() ); // FIXME! check this
if ( iVer>=0x103 )
tUpd.m_bIgnoreNonexistent = ( tReq.GetDword() & 1 )!=0;
bool bBlobUpdate = false;
for ( auto & i : tUpd.m_dAttributes )
{
i.m_sName = tReq.GetString();
if ( i.m_sName==sphGetDocidName() )
return SendErrorReply ( tOut, "'id' attribute cannot be updated" );
i.m_eType = SPH_ATTR_INTEGER;
if ( iVer>=0x102 )
{
auto eUpdate = (UpdateType_e)tReq.GetDword();
switch ( eUpdate )
{
case UPDATE_MVA32:
i.m_eType = SPH_ATTR_UINT32SET;
bBlobUpdate = true;
break;
case UPDATE_STRING:
case UPDATE_JSON:
i.m_eType = SPH_ATTR_STRING;
bBlobUpdate = true;
break;
default:
break;
}
}
}
int iNumUpdates = tReq.GetInt (); // FIXME! check this
tUpd.m_dDocids.Reserve ( iNumUpdates );
tUpd.m_dRowOffset.Reserve ( iNumUpdates );
for ( int i=0; i<iNumUpdates; ++i )
{
// v.1.0 always sends 32-bit ids; v.1.1+ always send 64-bit ones
uint64_t uDocid = ( iVer>=0x101 ) ? tReq.GetUint64 () : tReq.GetDword ();
tUpd.m_dDocids.Add ( uDocid );
tUpd.m_dRowOffset.Add ( tUpd.m_dPool.GetLength() );
for ( const auto & iAttr : tUpd.m_dAttributes )
{
switch ( iAttr.m_eType )
{
case SPH_ATTR_UINT32SET:
{
DWORD uCount = tReq.GetDword ();
if ( !uCount )
{
tUpd.m_dPool.Add ( 0 );
continue;
}
dMva.Resize ( uCount );
for ( DWORD j=0; j<uCount; j++ )
dMva[j] = tReq.GetDword();
dMva.Uniq(); // don't need dupes within MVA
tUpd.m_dPool.Add ( dMva.GetLength()*2 );
ARRAY_FOREACH ( j, dMva )
{
*(int64_t*)tUpd.m_dPool.AddN(2) = dMva[j]; // dummy expander mva32 -> mva64
}
}
break;
case SPH_ATTR_STRING:
{
DWORD uLen = tReq.GetDword();
tUpd.m_dPool.Add ( tUpd.m_dBlobs.GetLength() );
tUpd.m_dPool.Add ( uLen );
if ( uLen )
{
// extra zeroes for json parser
BYTE * pAdded = tUpd.m_dBlobs.AddN ( uLen+2 );
if ( !tReq.GetBytes ( pAdded, uLen ) )
return SendErrorReply ( tOut, "error reading string" );
pAdded[uLen] = 0;
pAdded[uLen+1] = 0;
}
}
break;
default:
tUpd.m_dPool.Add ( tReq.GetDword() );
break;
}
}
}
if ( tReq.GetError() )
return SendErrorReply ( tOut, "invalid or truncated request" );
// check index names
StrVec_t dIndexNames;
ParseIndexList ( sIndexes, dIndexNames );
if ( dIndexNames.IsEmpty() )
return SendErrorReply ( tOut, "no valid tables in update request" );
DistrPtrs_t dDistributed;
// copy distributed indexes description
CSphString sMissed;
if ( !ExtractDistributedIndexes ( dIndexNames, dDistributed, sMissed ) )
return SendErrorReply ( tOut, "unknown table '%s' in update request", sMissed.cstr() );
// do update
SearchFailuresLog_c dFails;
int iSuccesses = 0;
int iUpdated = 0;
ARRAY_FOREACH ( iIdx, dIndexNames )
{
const CSphString & sReqIndex = dIndexNames[iIdx];
auto pLocal = GetServed ( sReqIndex );
if ( pLocal )
{
DoCommandUpdate ( sReqIndex, pLocal->m_sCluster, nullptr, pUpd, bBlobUpdate, iSuccesses, iUpdated, dFails );
} else if ( dDistributed[iIdx] )
{
auto * pDist = dDistributed[iIdx];
assert ( !pDist->IsEmpty() );
for ( const CSphString & sLocal : pDist->m_dLocal )
{
auto pServed = GetServed ( sLocal );
if ( !pServed )
continue;
DoCommandUpdate ( sLocal, pServed->m_sCluster, sReqIndex.cstr(), pUpd, bBlobUpdate, iSuccesses, iUpdated, dFails );
}
// update remote agents
if ( !dDistributed[iIdx]->m_dAgents.IsEmpty() )
{
VecRefPtrsAgentConn_t dAgents;
pDist->GetAllHosts ( dAgents );
// connect to remote agents and query them
UpdateRequestBuilder_c tReqBuilder ( pUpd );
UpdateReplyParser_c tParser ( &iUpdated );
iSuccesses += PerformRemoteTasks ( dAgents, &tReqBuilder, &tParser );
}
}
}
// serve reply to client
StringBuilder_c sReport;
dFails.BuildReport ( sReport );
if ( !iSuccesses )
return SendErrorReply ( tOut, "%s", sReport.cstr() );
auto tReply = APIAnswer ( tOut, VER_COMMAND_UPDATE, dFails.IsEmpty() ? SEARCHD_OK : SEARCHD_WARNING );
if ( !dFails.IsEmpty() )
tOut.SendString ( sReport.cstr () );
tOut.SendInt ( iUpdated );
}
//////////////////////////////////////////////////////////////////////////
// STATUS HANDLER
//////////////////////////////////////////////////////////////////////////
void BuildStatus ( VectorLike & dStatus )
{
auto & g_tStats = gStats ();
const char * OFF = "OFF";
const int64_t iQueriesDiv = Max ( g_tStats.m_iQueries.load ( std::memory_order_relaxed ), 1 );
const int64_t iDistQueriesDiv = Max ( g_tStats.m_iDistQueries.load ( std::memory_order_relaxed ), 1 );
const int64_t iDiv1000 = iQueriesDiv * 1000;
const int64_t iDDiv1000 = iDistQueriesDiv * 1000;
dStatus.SetColName ( "Counter" );
// FIXME? non-transactional!!!
dStatus.MatchTupletf ( "uptime", "%u", (DWORD) time ( nullptr )-g_tStats.m_uStarted );
dStatus.MatchTupletf ( "connections", "%l", g_tStats.m_iConnections.load ( std::memory_order_relaxed ) );
dStatus.MatchTupletf ( "maxed_out", "%l", g_tStats.m_iMaxedOut.load ( std::memory_order_relaxed ) );
dStatus.MatchTuplet ( "version" , g_sStatusVersion.cstr() );
dStatus.MatchTuplet ( "mysql_version", g_sMySQLVersion.cstr() );
for ( auto i=0; i<SEARCHD_COMMAND_TOTAL; ++i)
{
if ( i==SEARCHD_COMMAND_UNUSED_6 )
continue;
dStatus.MatchTupletf ( szCommand ( i ), "%l", g_tStats.m_iCommandCount[i].load ( std::memory_order_relaxed ) );
}
FormatCmdStats ( dStatus, "insert_replace", SearchdStats_t::eReplace );
FormatCmdStats ( dStatus, "search", SearchdStats_t::eSearch );
FormatCmdStats ( dStatus, "update", SearchdStats_t::eUpdate );
auto iConnects = g_tStats.m_iAgentConnectTFO.load ( std::memory_order_relaxed )
+g_tStats.m_iAgentConnect.load ( std::memory_order_relaxed );
dStatus.MatchTupletf ( "agent_connect", "%l", iConnects );
dStatus.MatchTupletf ( "agent_tfo", "%l", g_tStats.m_iAgentConnectTFO.load ( std::memory_order_relaxed ) );
dStatus.MatchTupletf ( "agent_retry", "%l", g_tStats.m_iAgentRetry.load ( std::memory_order_relaxed ) );
dStatus.MatchTupletf ( "queries", "%l", g_tStats.m_iQueries.load ( std::memory_order_relaxed ) );
dStatus.MatchTupletf ( "dist_queries", "%l", g_tStats.m_iDistQueries.load ( std::memory_order_relaxed ) );
// status of thread pool
dStatus.MatchTupletf ( "workers_total", "%d", GlobalWorkPool ()->WorkingThreads () );
dStatus.MatchTupletf ( "workers_active", "%d", myinfo::CountTasks () );
dStatus.MatchTupletf ( "workers_clients", "%d", myinfo::CountClients () );
dStatus.MatchTupletf ( "workers_clients_vip", "%u", session::GetVips() );
dStatus.MatchTupletf ( "workers_clients_buddy", "%u", session::GetBuddyCount() );
dStatus.MatchTupletf ( "work_queue_length", "%d", GlobalWorkPool ()->Works () );
dStatus.MatchTupletf ( "load", "%0.2f %0.2f %0.2f", g_tStat1m.Value(), g_tStat5m.Value(), g_tStat15m.Value() );
dStatus.MatchTupletf ( "load_primary", "%0.2f %0.2f %0.2f", g_tPriStat1m.Value(), g_tPriStat5m.Value(), g_tPriStat15m.Value() );
dStatus.MatchTupletf ( "load_secondary", "%0.2f %0.2f %0.2f", g_tSecStat1m.Value(), g_tSecStat5m.Value(), g_tSecStat15m.Value() );
// macro defined in fileio.h
#if TRACE_UNZIP
{
StringBuilder_c sstat {", "};
auto& stats = CSphReader::GetStat32();
for ( const auto& stat : stats )
sstat << stat.load(std::memory_order_relaxed);
dStatus.MatchTupletf ( "unzip32_hist", "%s", sstat.cstr() );
}
{
StringBuilder_c sstat { ", " };
auto& stats = CSphReader::GetStat64();
for ( const auto& stat : stats )
sstat << stat.load ( std::memory_order_relaxed );
dStatus.MatchTupletf ( "unzip64_hist", "%s", sstat.cstr() );
}
#endif
assert ( g_pDistIndexes );
auto pDistSnapshot = g_pDistIndexes->GetHash();
for ( auto& tIt : *pDistSnapshot )
{
const char * sIdx = tIt.first.cstr();
const auto& dAgents = tIt.second->m_dAgents;
StringBuilder_c sKey;
ARRAY_FOREACH ( i, dAgents )
{
MultiAgentDescRefPtr_c pMultiAgent = dAgents[i];
MultiAgentDesc_c& dMultiAgent = *pMultiAgent;
ARRAY_FOREACH ( j, dMultiAgent )
{
const auto pMetrics = dMultiAgent[j].m_pMetrics;
for ( int k = 0; k<eMaxAgentStat; ++k )
{
sKey.Clear();
sKey.Sprintf ( "ag_%s_%d_%d_%s", sIdx, i+1, j+1, sAgentStatsNames[k] );
dStatus.MatchTupletf ( sKey.cstr (), "%l", pMetrics->m_dCounters[k].load (std::memory_order_relaxed ) );
}
for ( int k = 0; k<ehMaxStat; ++k )
{
sKey.Clear ();
sKey.Sprintf ( "ag_%s_%d_%d_%s", sIdx, i+1, j+1, sAgentStatsNames[eMaxAgentStat+k] );
const char * sFmt = ( k==ehTotalMsecs || k==ehAverageMsecs || k==ehMaxMsecs ) ? "%0.3F" : "%l";
dStatus.MatchTupletf ( sKey.cstr (), sFmt, pMetrics->m_dMetrics[k] );
}
}
}
}
dStatus.MatchTupletf ( "query_wall", "%0.3F", g_tStats.m_iQueryTime.load ( std::memory_order_relaxed ) / 1000 );
if ( g_bCpuStats )
dStatus.MatchTupletf ( "query_cpu", "%0.3F", g_tStats.m_iQueryCpuTime.load ( std::memory_order_relaxed ) / 1000 );
else
dStatus.MatchTuplet ( "query_cpu", OFF);
dStatus.MatchTupletf ( "dist_wall", "%0.3F", g_tStats.m_iDistWallTime.load ( std::memory_order_relaxed ) / 1000 );
dStatus.MatchTupletf ( "dist_local", "%0.3F", g_tStats.m_iDistLocalTime.load ( std::memory_order_relaxed ) / 1000 );
dStatus.MatchTupletf ( "dist_wait", "%0.3F", g_tStats.m_iDistWaitTime.load ( std::memory_order_relaxed ) / 1000 );
if ( g_bIOStats )
{
dStatus.MatchTupletf ( "query_reads", "%l", g_tStats.m_iDiskReads.load ( std::memory_order_relaxed ) );
dStatus.MatchTupletf ( "query_readkb", "%l", g_tStats.m_iDiskReadBytes.load ( std::memory_order_relaxed )/ 1024 );
dStatus.MatchTupletf ( "query_readtime", "%l", g_tStats.m_iDiskReadTime.load ( std::memory_order_relaxed ) );
} else
{
dStatus.MatchTuplet ( "query_reads", OFF );
dStatus.MatchTuplet ( "query_readkb", OFF );
dStatus.MatchTuplet ( "query_readtime", OFF );
}
if ( g_tStats.m_iPredictedTime.load ( std::memory_order_relaxed )
|| g_tStats.m_iAgentPredictedTime.load ( std::memory_order_relaxed ) )
{
dStatus.MatchTupletf ( "predicted_time", "%l", g_tStats.m_iPredictedTime.load ( std::memory_order_relaxed ) );
dStatus.MatchTupletf ( "dist_predicted_time", "%l", g_tStats.m_iAgentPredictedTime.load ( std::memory_order_relaxed ) );
}
dStatus.MatchTupletf ( "avg_query_wall", "%0.3F", g_tStats.m_iQueryTime.load ( std::memory_order_relaxed ) / iDiv1000 );
if ( g_bCpuStats )
dStatus.MatchTupletf ( "avg_query_cpu", "%0.3F", g_tStats.m_iQueryCpuTime.load ( std::memory_order_relaxed ) / iDiv1000 );
else
dStatus.MatchTuplet ( "avg_query_cpu", OFF );
dStatus.MatchTupletf ( "avg_dist_wall", "%0.3F", g_tStats.m_iDistWallTime.load ( std::memory_order_relaxed ) / iDDiv1000 );
dStatus.MatchTupletf ( "avg_dist_local", "%0.3F", g_tStats.m_iDistLocalTime.load ( std::memory_order_relaxed ) / iDDiv1000 );
dStatus.MatchTupletf ( "avg_dist_wait", "%0.3F", g_tStats.m_iDistWaitTime.load ( std::memory_order_relaxed ) / iDDiv1000 );
if ( g_bIOStats )
{
dStatus.MatchTupletf ( "avg_query_reads", "%0.1F", g_tStats.m_iDiskReads.load ( std::memory_order_relaxed ) * 10 / iQueriesDiv );
dStatus.MatchTupletf ( "avg_query_readkb", "%0.1F", g_tStats.m_iDiskReadBytes.load ( std::memory_order_relaxed ) * 10 / (iQueriesDiv*1024) );
dStatus.MatchTupletf ( "avg_query_readtime", "%0.3F", g_tStats.m_iDiskReadTime.load ( std::memory_order_relaxed ) / iDiv1000 );
} else
{
dStatus.MatchTuplet ( "avg_query_reads", OFF );
dStatus.MatchTuplet ( "avg_query_readkb", OFF );
dStatus.MatchTuplet ( "avg_query_readtime", OFF );
}
const QcacheStatus_t & s = QcacheGetStatus();
dStatus.MatchTupletf ( "qcache_max_bytes", "%l", s.m_iMaxBytes );
dStatus.MatchTupletf ( "qcache_thresh_msec", "%d", s.m_iThreshMs );
dStatus.MatchTupletf ( "qcache_ttl_sec", "%d", s.m_iTtlS );
dStatus.MatchTupletf ( "qcache_cached_queries", "%d", s.m_iCachedQueries );
dStatus.MatchTupletf ( "qcache_used_bytes", "%l", s.m_iUsedBytes );
dStatus.MatchTupletf ( "qcache_hits", "%l", s.m_iHits );
// clusters
ReplicateClustersStatus ( dStatus );
}
// that is returned to MySQL 'statistic' command ('status' in mysql cli)
void BuildStatusOneline ( StringBuilder_c & sOut )
{
auto iThreads = GlobalWorkPool ()->WorkingThreads ();
auto tSample = GlobalWorkPool()->Tasks();
auto tCurrent = GlobalWorkPool()->CurTasks();
auto iQueue = tSample.iPri + tSample.iSec + tCurrent;
auto iTasks = myinfo::CountTasks ();
auto & g_tStats = gStats ();
sOut.StartBlock ( " " );
sOut
<< "Uptime:" << (DWORD) time ( NULL )-g_tStats.m_uStarted
<< " Threads:" << iThreads;
sOut.Sprintf (" Queue now+pri+sec=total: %d+%d+%d=%d", tCurrent, tSample.iPri, tSample.iSec, iQueue );
sOut
<< " Clients:" << myinfo::CountClients()
<< " Vip clients:" << session::GetVips()
<< " Buddy clients:" << session::GetBuddyCount()
<< " Tasks:" << iTasks
<< " Queries:" << g_tStats.m_iQueries.load ( std::memory_order_relaxed );
sOut.Sprintf ( " Wall: %t", (int64_t)g_tStats.m_iQueryTime.load ( std::memory_order_relaxed ) );
sOut.Sprintf ( " CPU: %t", (int64_t)g_tStats.m_iQueryCpuTime.load ( std::memory_order_relaxed ) );
sOut.Sprintf ( "\nQueue/Th: %0.1F%", iQueue * 10 / iThreads );
sOut.Sprintf ( " Tasks/Th: %0.1F%", iTasks * 10 / iThreads );
sOut.Sprintf ( "\nLoad average: %0.2f, %0.2f, %0.2f", g_tStat1m.Value(), g_tStat5m.Value(), g_tStat15m.Value() );
}
void BuildOneAgentStatus ( VectorLike & dStatus, HostDashboardRefPtr_t pDash, const char * sPrefix="agent" )
{
assert ( pDash );
{
ScRL_t tGuard ( pDash->m_dMetricsLock );
if ( dStatus.MatchAddf ( "%s_hostname", sPrefix ) )
dStatus.Add ( pDash->m_tHost.GetMyUrl ().cstr () );
if ( dStatus.MatchAddf ( "%s_references", sPrefix ) )
dStatus.Addf( "%d", (int) pDash->GetRefcount()-1 ); // -1 since we currently also 'use' the agent, reading it's stats
if ( dStatus.MatchAddf ( "%s_ping", sPrefix ) )
dStatus.Add ( pDash->m_iNeedPing ? "yes" : "no" );
if ( dStatus.MatchAddf ( "%s_has_perspool", sPrefix ) )
dStatus.Add ( pDash->m_pPersPool ? "yes" : "no" );
if ( dStatus.MatchAddf ( "%s_need_resolve", sPrefix ) )
dStatus.Add ( pDash->m_tHost.m_bNeedResolve ? "yes" : "no" );
uint64_t iCur = sphMicroTimer();
uint64_t iLastAccess = iCur - pDash->m_iLastQueryTime;
if ( dStatus.MatchAddf ( "%s_lastquery", sPrefix ) )
dStatus.Addf ( "%.2F", iLastAccess / 10000 );
iLastAccess = iCur - pDash->m_iLastAnswerTime;
if ( dStatus.MatchAddf ( "%s_lastanswer", sPrefix ) )
dStatus.Addf ( "%.2F", iLastAccess / 10000 );
uint64_t iLastTimer = pDash->m_iLastAnswerTime-pDash->m_iLastQueryTime;
if ( dStatus.MatchAddf ( "%s_lastperiodmsec", sPrefix ) )
dStatus.Addf ( "%.3D", iLastTimer );
if ( dStatus.MatchAddf ( "%s_pingtripmsec", sPrefix ) )
dStatus.Addf ( "%.3F", pDash->m_uPingTripUS );
if ( dStatus.MatchAddf ( "%s_errorsarow", sPrefix ) )
dStatus.Addf ( "%l", pDash->m_iErrorsARow );
}
int iPeriods = 1;
while ( iPeriods>0 )
{
HostMetricsSnapshot_t dMetricsSnapshot;
pDash->GetCollectedMetrics ( dMetricsSnapshot, iPeriods );
{
for ( int j = 0; j<ehMaxStat+eMaxAgentStat; ++j )
// hack. Avoid microseconds in human-readable statistic
if ( j==ehTotalMsecs && dStatus.MatchAddf ( "%s_%dperiods_msecsperqueryy", sPrefix, iPeriods ) )
{
if ( dMetricsSnapshot[ehConnTries]>0 )
dStatus.Addf ( "%.2F", dMetricsSnapshot[ehTotalMsecs] / dMetricsSnapshot[ehConnTries] / 10 );
else
dStatus.Add ( "n/a" );
} else if ( dStatus.MatchAddf ( "%s_%dperiods_%s", sPrefix, iPeriods, sAgentStatsNames[j] ) )
{
if ( j==ehMaxMsecs || j==ehAverageMsecs )
dStatus.Addf ( "%.2F", dMetricsSnapshot[j] / 10 );
else
dStatus.Addf ( "%l", dMetricsSnapshot[j] );
}
}
if ( iPeriods==1 )
iPeriods = 5;
else if ( iPeriods==5 )
iPeriods = STATS_DASH_PERIODS;
else if ( iPeriods==STATS_DASH_PERIODS )
iPeriods = -1;
}
}
static bool BuildDistIndexStatus ( VectorLike & dStatus, const CSphString& sIndex )
{
auto pDistr = GetDistr ( sIndex );
if ( !pDistr )
return false;
ARRAY_FOREACH ( i, pDistr->m_dLocal )
{
if ( dStatus.MatchAddf ( "dstindex_local_%d", i+1 ) )
dStatus.Add ( pDistr->m_dLocal[i].cstr() );
}
CSphString sKey;
ARRAY_FOREACH ( i, pDistr->m_dAgents )
{
MultiAgentDescRefPtr_c pAgents = pDistr->m_dAgents[i];
const MultiAgentDesc_c& tAgents = *pAgents;
if ( dStatus.MatchAddf ( "dstindex_%d_is_ha", i+1 ) )
dStatus.Add ( tAgents.IsHA()? "1": "0" );
auto dWeights = tAgents.GetWeights ();
ARRAY_FOREACH ( j, tAgents )
{
if ( tAgents.IsHA() )
sKey.SetSprintf ( "dstindex_%dmirror%d", i+1, j+1 );
else
sKey.SetSprintf ( "dstindex_%dagent", i+1 );
const AgentDesc_t & dDesc = tAgents[j];
if ( dStatus.MatchAddf ( "%s_id", sKey.cstr () ) )
dStatus.Addf ( "%s:%s", dDesc.GetMyUrl ().cstr (), dDesc.m_sIndexes.cstr () );
if ( tAgents.IsHA() && dStatus.MatchAddf ( "%s_probability_weight", sKey.cstr () ) )
dStatus.Addf ( "%0.2f%%", dWeights[j] );
if ( dStatus.MatchAddf ( "%s_is_blackhole", sKey.cstr () ) )
dStatus.Add ( dDesc.m_bBlackhole ? "1" : "0" );
if ( dStatus.MatchAddf ( "%s_is_persistent", sKey.cstr () ) )
dStatus.Add ( dDesc.m_bPersistent ? "1" : "0" );
}
}
return true;
}
void BuildAgentStatus ( VectorLike &dStatus, const CSphString& sIndexOrAgent )
{
if ( !sIndexOrAgent.IsEmpty() )
{
if ( !BuildDistIndexStatus ( dStatus, sIndexOrAgent ) )
{
auto pAgent = Dashboard::FindAgent ( sIndexOrAgent );
if ( pAgent )
BuildOneAgentStatus ( dStatus, pAgent );
else
dStatus.MatchTupletf ( "status_error", "No such distributed table or agent: %s", sIndexOrAgent.cstr () );
}
return;
}
dStatus.SetColName ( "Key" );
dStatus.MatchTupletf( "status_period_seconds", "%d", g_uHAPeriodKarmaS );
dStatus.MatchTupletf ( "status_stored_periods", "%d", STATS_DASH_PERIODS );
auto dDashes = Dashboard::GetActiveHosts();
CSphString sPrefix;
ARRAY_FOREACH ( i, dDashes )
{
sPrefix.SetSprintf ( "ag_%d", i );
BuildOneAgentStatus ( dStatus, dDashes[i], sPrefix.cstr() );
}
}
static void AddIOStatsToMeta ( VectorLike & dStatus, const CSphIOStats & tStats, const char * sPrefix )
{
if ( dStatus.MatchAddf ( "%s%s", sPrefix, "io_read_time" ) )
dStatus.Addf( "%.3F", tStats.m_iReadTime);
if ( dStatus.MatchAddf ( "%s%s", sPrefix, "io_read_ops" ) )
dStatus.Addf ( "%u", tStats.m_iReadOps );
if ( dStatus.MatchAddf ( "%s%s", sPrefix, "io_read_kbytes" ) )
dStatus.Addf ( "%d.%d", (int) ( tStats.m_iReadBytes / 1024 ), (int) ( tStats.m_iReadBytes % 1024 ) / 100 );
if ( dStatus.MatchAddf ( "%s%s", sPrefix, "io_write_time" ) )
dStatus.Addf ( "%.3F", tStats.m_iWriteTime );
if ( dStatus.MatchAddf ( "%s%s", sPrefix, "io_write_ops" ) )
dStatus.Addf ( "%u", tStats.m_iWriteOps );
if ( dStatus.MatchAddf ( "%s%s", sPrefix, "io_write_kbytes" ) )
dStatus.Addf ( "%d.%d", (int)( tStats.m_iWriteBytes/1024 ), (int)( tStats.m_iWriteBytes%1024 )/100 );
}
void BuildMeta ( VectorLike & dStatus, const CSphQueryResultMeta & tMeta )
{
if ( !tMeta.m_sError.IsEmpty() )
dStatus.MatchTuplet ( "error", tMeta.m_sError.cstr () );
if ( !tMeta.m_sWarning.IsEmpty() )
dStatus.MatchTuplet ( "warning", tMeta.m_sWarning.cstr () );
dStatus.MatchTupletf ( "total", "%d", tMeta.m_iMatches );
dStatus.MatchTupletf ( "total_found", "%l", tMeta.m_iTotalMatches );
dStatus.MatchTupletf ( "total_relation", "%s", tMeta.m_bTotalMatchesApprox ? "gte" : "eq" );
dStatus.MatchTupletf ( "time", "%.3F", tMeta.m_iQueryTime );
if ( tMeta.m_iMultiplier>1 )
dStatus.MatchTupletf ( "multiplier", "%d", tMeta.m_iMultiplier );
if ( g_bCpuStats )
{
dStatus.MatchTupletf ( "cpu_time", "%.3F", tMeta.m_iCpuTime );
dStatus.MatchTupletf ( "agents_cpu_time", "%.3F", tMeta.m_iAgentCpuTime );
}
if ( g_bIOStats )
{
AddIOStatsToMeta ( dStatus, tMeta.m_tIOStats, "" );
AddIOStatsToMeta ( dStatus, tMeta.m_tAgentIOStats, "agent_" );
}
if ( tMeta.m_bHasPrediction )
{
dStatus.MatchTupletf ( "local_fetched_docs", "%d", tMeta.m_tStats.m_iFetchedDocs );
dStatus.MatchTupletf ( "local_fetched_hits", "%d", tMeta.m_tStats.m_iFetchedHits );
dStatus.MatchTupletf ( "local_fetched_skips", "%d", tMeta.m_tStats.m_iSkips );
dStatus.MatchTupletf ( "predicted_time", "%l", tMeta.m_iPredictedTime );
if ( tMeta.m_iAgentPredictedTime )
dStatus.MatchTupletf ( "dist_predicted_time", "%l", tMeta.m_iAgentPredictedTime );
if ( tMeta.m_iAgentFetchedDocs || tMeta.m_iAgentFetchedHits || tMeta.m_iAgentFetchedSkips )
{
dStatus.MatchTupletf ( "dist_fetched_docs", "%d", tMeta.m_tStats.m_iFetchedDocs+tMeta.m_iAgentFetchedDocs );
dStatus.MatchTupletf ( "dist_fetched_hits", "%d", tMeta.m_tStats.m_iFetchedHits+tMeta.m_iAgentFetchedHits );
dStatus.MatchTupletf ( "dist_fetched_skips", "%d", tMeta.m_tStats.m_iSkips+tMeta.m_iAgentFetchedSkips );
}
}
auto dWords = tMeta.MakeSortedWordStat();
ARRAY_CONSTFOREACH( iWord, dWords )
{
auto * pWord = dWords[iWord];
assert ( pWord );
if ( dStatus.MatchAddf ( "keyword[%d]", iWord ) )
dStatus.Add ( pWord->first );
if ( dStatus.MatchAddf ( "docs[%d]", iWord ) )
dStatus.Addf ( "%l", pWord->second.first );
if ( dStatus.MatchAddf ( "hits[%d]", iWord ) )
dStatus.Addf ( "%l", pWord->second.second );
}
StringBuilder_c sIterators { ", " };
for ( const auto & i : tMeta.m_tIteratorStats.m_dIterators )
sIterators.Appendf ( "%s:%s (%d%%)", i.m_sAttr.cstr(), i.m_sType.cstr(), int(float(i.m_iUsed)/tMeta.m_tIteratorStats.m_iTotal*100.0f) );
if ( !sIterators.IsEmpty() )
dStatus.MatchTuplet ( "index", sIterators.cstr() );
}
void HandleCommandStatus ( ISphOutputBuffer & tOut, WORD uVer, InputBuffer_c & tReq )
{
if ( !CheckCommandVersion ( uVer, VER_COMMAND_STATUS, tOut ) )
return;
bool bGlobalStat = tReq.GetDword ()!=0;
VectorLike dStatus;
if ( bGlobalStat )
BuildStatus ( dStatus );
else
{
ScRL_t dMetaRlock ( g_tLastMetaLock );
auto & g_tStats = gStats ();
BuildMeta ( dStatus, g_tLastMeta );
if ( g_tStats.m_iPredictedTime.load ( std::memory_order_relaxed )
|| g_tStats.m_iAgentPredictedTime.load ( std::memory_order_relaxed ) )
{
dStatus.MatchTupletf ( "predicted_time", "%l", g_tStats.m_iPredictedTime.load ( std::memory_order_relaxed ) );
dStatus.MatchTupletf ( "dist_predicted_time", "%l", g_tStats.m_iAgentPredictedTime.load ( std::memory_order_relaxed ) );
}
}
auto tReply = APIAnswer ( tOut, VER_COMMAND_STATUS );
tOut.SendInt ( dStatus.GetLength () / dStatus.Header ().GetLength () ); // rows
tOut.SendInt ( dStatus.Header ().GetLength () ); // cols
for ( const auto & dLines : dStatus )
tOut.SendString ( dLines.cstr() );
}
//////////////////////////////////////////////////////////////////////////
// FLUSH HANDLER
//////////////////////////////////////////////////////////////////////////
void HandleCommandFlush ( ISphOutputBuffer & tOut, WORD uVer )
{
if ( !CheckCommandVersion ( uVer, VER_COMMAND_FLUSHATTRS, tOut ) )
return;
int iTag = CommandFlush ();
// return last flush tag, just for the fun of it
auto tReply = APIAnswer ( tOut, VER_COMMAND_FLUSHATTRS );
tOut.SendInt ( iTag );
}
/////////////////////////////////////////////////////////////////////////////
// GENERAL HANDLER
/////////////////////////////////////////////////////////////////////////////
void HandleCommandSphinxql ( GenericOutputBuffer_c & tOut, WORD uVer, InputBuffer_c & tReq ); // definition is below
void HandleCommandJson ( ISphOutputBuffer & tOut, WORD uVer, InputBuffer_c & tReq );
void StatCountCommand ( SearchdCommand_e eCmd );
void HandleCommandUserVar ( ISphOutputBuffer & tOut, WORD uVer, InputBuffer_c & tReq );
void HandleCommandCallPq ( ISphOutputBuffer &tOut, WORD uVer, InputBuffer_c &tReq );
static void HandleCommandSuggest ( ISphOutputBuffer & tOut, WORD uVer, InputBuffer_c & tReq );
/// ping/pong exchange over API
void HandleCommandPing ( ISphOutputBuffer & tOut, WORD uVer, InputBuffer_c & tReq )
{
if ( !CheckCommandVersion ( uVer, VER_COMMAND_PING, tOut ) )
return;
// parse ping
int iCookie = tReq.GetInt();
if ( tReq.GetError () )
return;
// return last flush tag, just for the fun of it
auto tReply = APIAnswer ( tOut, VER_COMMAND_PING );
tOut.SendInt ( iCookie ); // echo the cookie back
}
void ExecuteApiCommand ( SearchdCommand_e eCommand, WORD uCommandVer, int iLength, InputBuffer_c & tBuf, GenericOutputBuffer_c & tOut )
{
auto & tSess = session::Info();
tSess.SetTaskState ( TaskState_e::QUERY );
// set on query guard
auto& tCrashQuery = GlobalCrashQueryGetRef();
tCrashQuery.m_eType = QUERY_API;
tCrashQuery.m_dQuery = { tBuf.GetBufferPtr(), iLength };
tCrashQuery.m_uCMD = eCommand;
tCrashQuery.m_uVer = uCommandVer;
// handle known commands
assert ( eCommand<SEARCHD_COMMAND_WRONG );
// count commands
StatCountCommand ( eCommand );
myinfo::SetCommand ( g_dApiCommands[eCommand] );
AT_SCOPE_EXIT ( []() { myinfo::SetCommandDone(); } );
sphLogDebugv ( "conn %s(%d): got command %d, handling", tSess.szClientName(), tSess.GetConnID(), eCommand );
switch ( eCommand )
{
case SEARCHD_COMMAND_SEARCH: HandleCommandSearch ( tOut, uCommandVer, tBuf ); break;
case SEARCHD_COMMAND_EXCERPT: HandleCommandExcerpt ( tOut, uCommandVer, tBuf ); break;
case SEARCHD_COMMAND_KEYWORDS: HandleCommandKeywords ( tOut, uCommandVer, tBuf ); break;
case SEARCHD_COMMAND_UPDATE: HandleCommandUpdate ( tOut, uCommandVer, tBuf ); break;
case SEARCHD_COMMAND_STATUS: HandleCommandStatus ( tOut, uCommandVer, tBuf ); break;
case SEARCHD_COMMAND_FLUSHATTRS:HandleCommandFlush ( tOut, uCommandVer ); break;
case SEARCHD_COMMAND_SPHINXQL: HandleCommandSphinxql ( tOut, uCommandVer, tBuf ); break;
case SEARCHD_COMMAND_JSON: HandleCommandJson ( tOut, uCommandVer, tBuf ); break;
case SEARCHD_COMMAND_PING: HandleCommandPing ( tOut, uCommandVer, tBuf ); break;
case SEARCHD_COMMAND_UVAR: HandleCommandUserVar ( tOut, uCommandVer, tBuf ); break;
case SEARCHD_COMMAND_CALLPQ: HandleCommandCallPq ( tOut, uCommandVer, tBuf ); break;
case SEARCHD_COMMAND_CLUSTER: HandleAPICommandCluster ( tOut, uCommandVer, tBuf, tSess.szClientName() ); break;
case SEARCHD_COMMAND_GETFIELD: HandleCommandGetField ( tOut, uCommandVer, tBuf ); break;
case SEARCHD_COMMAND_SUGGEST: HandleCommandSuggest ( tOut, uCommandVer, tBuf ); break;
case SEARCHD_COMMAND_PERSIST: break; // already processes, here just for stat
default:
SendErrorReply ( tOut, "internal error: unhandled command" );
break;
}
}
void StmtErrorReporter_i::Error ( const char * sTemplate, ... )
{
StringBuilder_c sBuf;
va_list ap;
va_start ( ap, sTemplate );
sBuf.vAppendf ( sTemplate, ap );
va_end ( ap );
ErrorEx ( EMYSQL_ERR::PARSE_ERROR, sBuf.cstr () );
}
class StmtErrorReporter_c final : public StmtErrorReporter_i
{
public:
explicit StmtErrorReporter_c ( RowBuffer_i & tBuffer )
: m_tRowBuffer ( tBuffer )
{}
void Ok ( int iAffectedRows, const CSphString & sWarning, int64_t iLastInsertId ) final
{
m_tRowBuffer.Ok ( iAffectedRows, ( sWarning.IsEmpty() ? 0 : 1 ), nullptr, false, iLastInsertId );
}
void Ok ( int iAffectedRows, int nWarnings ) final
{
m_tRowBuffer.Ok ( iAffectedRows, nWarnings );
}
void ErrorEx ( EMYSQL_ERR iErr, const char * sError ) final
{
m_tRowBuffer.Error ( sError, iErr );
}
RowBuffer_i * GetBuffer() final { return &m_tRowBuffer; }
private:
RowBuffer_i & m_tRowBuffer;
};
struct StringPtrTraits_t
{
CSphVector<BYTE> m_dPackedData;
CSphFixedVector<int> m_dOff { 0 };
CSphVector<BYTE> m_dParserBuf;
// remap offsets to string pointers
void SavePointersTo ( VecTraits_T<const char *> &dStrings, bool bSkipInvalid=true ) const
{
if ( bSkipInvalid )
ARRAY_FOREACH ( i, m_dOff )
{
int iOff = m_dOff[i];
if ( iOff<0 )
continue;
dStrings[i] = ( const char * ) m_dPackedData.Begin () + iOff;
}
else
ARRAY_FOREACH ( i, m_dOff )
{
int iOff = m_dOff[i];
dStrings[i] = ( iOff>=0 ? ( const char * ) m_dPackedData.Begin () + iOff : nullptr );
}
}
void Reset ()
{
m_dPackedData.Resize ( 0 );
m_dParserBuf.Resize ( 0 );
m_dOff.Fill ( -1 );
}
BYTE * ReserveBlob ( int iBlobSize, int iOffset )
{
if ( !iBlobSize )
return nullptr;
m_dOff[iOffset] = m_dPackedData.GetLength ();
BYTE * pPacked = m_dPackedData.AddN ( sphCalcPackedLength(iBlobSize) );
pPacked += ZipToPtrBE ( pPacked, iBlobSize );
return pPacked;
}
};
static void BsonToSqlInsert ( const bson::Bson_c& dBson, SqlInsert_t& tAttr )
{
switch ( dBson.GetType () )
{
case JSON_INT32:
case JSON_INT64: tAttr.m_iType = SqlInsert_t::CONST_INT;
tAttr.SetValueInt ( dBson.Int() );
break;
case JSON_DOUBLE: tAttr.m_iType = SqlInsert_t::CONST_FLOAT;
tAttr.m_fVal = float ( dBson.Double () );
break;
case JSON_STRING: tAttr.m_iType = SqlInsert_t::QUOTED_STRING;
tAttr.m_sVal = dBson.String ();
default: break;
}
}
template<typename T>
static int CopyBsonValues ( CSphVector<int64_t> & dMva, const bson::Bson_c & dBson )
{
auto dValues = bson::Vector<T> ( dBson );
int64_t * pDst = dMva.AddN ( dValues.GetLength() );
ARRAY_FOREACH ( i, dValues )
pDst[i] = dValues[i];
return dValues.GetLength();
}
// save bson array to 64 bit mvaint64 mva
static int BsonArrayToMva ( CSphVector<int64_t> & dMva, const bson::Bson_c & dBson )
{
int iOff = dMva.GetLength ();
dMva.Add ();
int iValues = 0;
if ( dBson.GetType ()==JSON_INT64_VECTOR )
iValues = CopyBsonValues<int64_t> ( dMva, dBson );
else if ( dBson.GetType ()==JSON_INT32_VECTOR )
iValues = CopyBsonValues<DWORD> ( dMva, dBson );
else
{ // slowest path - m.b. need conversion of every value
bson::BsonIterator_c dIter ( dBson );
iValues = dIter.NumElems();
int64_t * pDst = dMva.AddN(iValues);
for ( ; dIter; dIter.Next () )
*pDst++ = dIter.Int ();
}
if ( !iValues ) // empty mva; discard resize
{
dMva.Resize ( iOff );
return -1;
}
auto pDst = &dMva[iOff + 1];
sphSort ( pDst, iValues );
iValues = sphUniq ( pDst, iValues );
dMva[iOff] = iValues;
dMva.Resize ( iOff + iValues + 1 );
return iOff;
}
static bool ParseBsonDocument ( const VecTraits_T<BYTE> & dDoc, const SchemaItemHash_c & tLoc, const CSphString & sIdAlias, int iRow, VecTraits_T<VecTraits_T<const char>>& dFields, CSphMatch & tDoc,
StringPtrTraits_t & tStrings, CSphVector<int64_t> & dMva, const CSphSchema &tSchema, Warner_c & sMsg )
{
using namespace bson;
Bson_c dBson ( dDoc );
if ( dDoc.IsEmpty () )
return false;
CSphString sError;
SqlInsert_t tAttr;
const SchemaItemVariant_t * pId = sIdAlias.IsEmpty () ? nullptr : tLoc.Find ( sphFNV64 ( sIdAlias.cstr() ) );
const CSphColumnInfo * pIdCol = tSchema.GetAttr ( sphGetDocidName () );
assert(pIdCol);
CSphAttrLocator tIdLoc = pIdCol->m_tLocator;
tIdLoc.m_bDynamic = true;
BsonIterator_c dChild ( dBson );
for ( ; dChild; dChild.Next () )
{
CSphString sName = dChild.GetName ();
sphColumnToLowercase ( const_cast<char *>( sName.cstr() ) );
const SchemaItemVariant_t * pItem = tLoc.Find ( sphFNV64 ( sName.cstr() ) );
// FIXME!!! warn on unknown JSON fields
if ( pItem )
{
if ( pItem->m_iField!=-1 && dChild.IsString () )
{
// stripper prior to build hits does not preserve field length
// but works with \0 strings and could walk all document and modifies it and alter field length
const VecTraits_T<const char> tField = Vector<const char> ( dChild );
if ( tField.GetLength() )
{
int64_t iOff = tStrings.m_dPackedData.GetLength();
// copy field content with tail zeroes
BYTE * pDst = tStrings.m_dPackedData.AddN ( tField.GetLength() + 1 + CSphString::GetGap() );
memcpy ( pDst, tField.Begin(), tField.GetLength() );
memset ( pDst + tField.GetLength(), 0, 1 + CSphString::GetGap() );
// pack offset into pointer then restore pointer after m_dPackedData filed
dFields[pItem->m_iField] = VecTraits_T<const char> ( (const char *)iOff, tField.GetLength() );
} else
{
dFields[pItem->m_iField] = tField;
}
if ( pItem==pId )
sMsg.Warn ( "field '%s' requested as docs_id identifier, but it is field!", sName.cstr() );
} else
{
BsonToSqlInsert ( dChild, tAttr );
CSphMatchVariant::SetAttr ( tDoc, pItem->m_tLoc, &sName, tAttr, pItem->m_eType, false, sError );
if ( pId==pItem )
tDoc.SetAttr ( tIdLoc, (DocID_t)dChild.Int() );
switch ( pItem->m_eType )
{
case SPH_ATTR_JSON:
assert ( pItem->m_iStr!=-1 );
{
if ( dChild.IsAssoc() || dChild.IsArray() )
{
// just save bson blob
BYTE * pDst = tStrings.ReserveBlob ( dChild.StandaloneSize(), pItem->m_iStr );
dChild.BsonToBson ( pDst );
} else
{
sMsg.Warn ( "JSON item (%s) should be object or array, got=%s", sName.cstr(), JsonTypeName ( dChild.GetType() ) );
}
}
break;
case SPH_ATTR_STRING:
assert ( pItem->m_iStr!=-1 );
{
auto dStrBlob = RawBlob ( dChild );
if ( dStrBlob.second )
{
tStrings.m_dOff[pItem->m_iStr] = tStrings.m_dPackedData.GetLength ();
BYTE * sDst = tStrings.m_dPackedData.AddN ( 1 + dStrBlob.second + CSphString::GetGap () );
memcpy ( sDst, dStrBlob.first, dStrBlob.second );
memset ( sDst + dStrBlob.second, 0, 1 + CSphString::GetGap () );
}
}
break;
case SPH_ATTR_UINT32SET:
case SPH_ATTR_INT64SET:
assert ( pItem->m_iMva!=-1 );
if ( dChild.IsArray() )
{
int iOff = BsonArrayToMva ( dMva, dChild );
if ( iOff>=0 )
dMva[pItem->m_iMva] = iOff;
} else
{
sMsg.Warn ( "MVA item (%s) should be array, got %s", sName.cstr(), JsonTypeName ( dChild.GetType() ) );
}
default:
break;
}
}
} else if ( !sIdAlias.IsEmpty() && sIdAlias==sName )
{
((CSphMatch &)tDoc).SetAttr ( tIdLoc, (DocID_t)dChild.Int() );
}
}
return true;
}
class PqRequestBuilder_c : public RequestBuilder_i
{
const BlobVec_t &m_dDocs;
const PercolateOptions_t &m_tOpts;
mutable std::atomic<int> m_iWorker {0};
int m_iStart;
int m_iStep;
public:
explicit PqRequestBuilder_c ( const BlobVec_t &dDocs, const PercolateOptions_t &tOpts, int iStart=0, int iStep=0 )
: m_dDocs ( dDocs )
, m_tOpts ( tOpts )
, m_iStart ( iStart )
, m_iStep ( iStep)
{}
void BuildRequest ( const AgentConn_t &tAgent, ISphOutputBuffer &tOut ) const final
{
// it sends either all queries to each agent or sequence of queries to current agent
auto iWorker = tAgent.m_iStoreTag;
if ( iWorker<0 )
{
iWorker = m_iWorker.fetch_add ( 1, std::memory_order_relaxed );
tAgent.m_iStoreTag = iWorker;
}
const char * sIndex = tAgent.m_tDesc.m_sIndexes.cstr ();
auto tHdr = APIHeader ( tOut, SEARCHD_COMMAND_CALLPQ, VER_COMMAND_CALLPQ );
DWORD uFlags = 0;
if ( m_tOpts.m_bGetDocs )
uFlags = 1;
if ( m_tOpts.m_bGetQuery )
uFlags |= 2;
if ( m_tOpts.m_bJsonDocs )
uFlags |= 4;
if ( m_tOpts.m_bVerbose )
uFlags |= 8;
if ( m_tOpts.m_bSkipBadJson )
uFlags |= 16;
tOut.SendDword ( uFlags );
tOut.SendString ( m_tOpts.m_sIdAlias.cstr () );
tOut.SendString ( sIndex );
// send docs (all or chunk)
int iStart = 0;
int iStep = m_dDocs.GetLength();
if ( m_iStep ) // sparsed case, calculate the interval.
{
iStart = m_iStart + m_iStep * iWorker;
iStep = Min ( iStep - iStart, m_iStep );
}
tOut.SendInt ( iStart );
tOut.SendInt ( iStep );
for ( int i=iStart; i<iStart+iStep; ++i)
tOut.SendArray ( m_dDocs[i] );
}
};
class PqReplyParser_c : public ReplyParser_i
{
public:
bool ParseReply ( MemInputBuffer_c &tReq, AgentConn_t &tAgent ) const final
{
// auto &dQueries = m_pWorker->m_dQueries;
// int iDoc = m_pWorker->m_dTasks[tAgent.m_iStoreTag].m_iHead;
if ( !tAgent.m_pResult )
tAgent.m_pResult = std::make_unique<CPqResult>();
auto pResult = (CPqResult*)tAgent.m_pResult.get();
auto &dResult = pResult->m_dResult;
auto uFlags = tReq.GetDword ();
bool bDumpDocs = !!(uFlags & 1U);
bool bQuery = !!(uFlags & 2U);
bool bDeduplicatedDocs = !!(uFlags & 4U);
dResult.m_bGetDocs = bDumpDocs;
dResult.m_bGetQuery = bQuery;
CSphVector<int> dDocs;
CSphVector<DocID_t> dDocids;
dDocids.Add(0); // just to keep docids 1-based and so, simplify processing by avoid checks.
int iRows = tReq.GetInt ();
dResult.m_dQueryDesc.Reset ( iRows );
for ( auto &tDesc : dResult.m_dQueryDesc )
{
tDesc.m_iQUID = tReq.GetUint64 ();
if ( bDumpDocs )
{
int iCount = tReq.GetInt ();
dDocs.Add ( iCount );
if ( bDeduplicatedDocs )
{
for ( int iDoc = 0; iDoc<iCount; ++iDoc )
{
dDocs.Add ( dDocids.GetLength () );
dDocids.Add ( ( int64_t ) tReq.GetUint64 () );
}
} else
{
for ( int iDoc = 0; iDoc<iCount; ++iDoc )
dDocs.Add ( tReq.GetInt () );
}
}
if ( bQuery )
{
auto uDescFlags = tReq.GetDword ();
if ( uDescFlags & 1U )
tDesc.m_sQuery = tReq.GetString ();
if ( uDescFlags & 2U )
tDesc.m_sTags = tReq.GetString ();
if ( uDescFlags & 4U )
tDesc.m_sFilters = tReq.GetString ();
tDesc.m_bQL = !!(uDescFlags & 8U);
}
}
// meta
dResult.m_tmTotal = tReq.GetUint64 ();
dResult.m_tmSetup = tReq.GetUint64 ();
dResult.m_iQueriesMatched = tReq.GetInt();
dResult.m_iQueriesFailed = tReq.GetInt ();
dResult.m_iDocsMatched = tReq.GetInt ();
dResult.m_iTotalQueries = tReq.GetInt ();
dResult.m_iOnlyTerms = tReq.GetInt ();
dResult.m_iEarlyOutQueries = tReq.GetInt ();
auto iDts = tReq.GetInt();
dResult.m_dQueryDT.Reset ( iDts );
for ( int& iDt : dResult.m_dQueryDT )
iDt = tReq.GetInt();
dResult.m_sMessages.Warn ( tReq.GetString () );
auto iDocs = dDocs.GetLength ();
dResult.m_dDocs.Set ( dDocs.LeakData (), iDocs );
if ( dDocids.GetLength()>1 )
{
iDocs = dDocids.GetLength ();
pResult->m_dDocids.Set ( dDocids.LeakData (), iDocs );
}
return true;
}
};
static void SendAPIPercolateReply ( ISphOutputBuffer & tOut, const CPqResult & tResult, int iShift=0 )
{
auto tReply = APIAnswer ( tOut, VER_COMMAND_CALLPQ );
CSphVector<int64_t> dTmpDocs;
int iDocOff = -1;
const PercolateMatchResult_t &tRes = tResult.m_dResult;
const CSphFixedVector<DocID_t> &dDocids = tResult.m_dDocids;
bool bHasDocids = !dDocids.IsEmpty ();
bool bDumpDocs = tRes.m_bGetDocs;
bool bQuery = tRes.m_bGetQuery;
DWORD uFlags = 0;
if ( bDumpDocs )
uFlags = 1;
if ( bQuery )
uFlags |=2;
if ( bHasDocids )
uFlags |=4;
tOut.SendDword ( uFlags );
tOut.SendInt ( tRes.m_dQueryDesc.GetLength () );
for ( const auto &tDesc : tRes.m_dQueryDesc )
{
tOut.SendUint64 ( tDesc.m_iQUID );
if ( bDumpDocs )
{
// document count + document id(s)
auto iCount = ( int ) ( tRes.m_dDocs[++iDocOff] );
if ( bHasDocids ) // need de-duplicate docs
{
dTmpDocs.Resize ( iCount );
for ( int iDoc = 0; iDoc<iCount; ++iDoc )
{
int iRow = tRes.m_dDocs[++iDocOff];
dTmpDocs[iDoc] = dDocids[iRow];
}
dTmpDocs.Uniq ();
tOut.SendInt ( dTmpDocs.GetLength());
for ( auto dTmpDoc : dTmpDocs )
tOut.SendUint64 ( dTmpDoc );
} else
{
tOut.SendInt ( iCount );
for ( int iDoc = 0; iDoc<iCount; ++iDoc )
tOut.SendInt ( iShift+tRes.m_dDocs[++iDocOff] );
}
}
if ( bQuery )
{
DWORD uDescFlags = 0;
if ( !tDesc.m_sQuery.IsEmpty ())
uDescFlags |=1;
if ( !tDesc.m_sTags.IsEmpty () )
uDescFlags |= 2;
if ( !tDesc.m_sFilters.IsEmpty () )
uDescFlags |= 4;
if ( tDesc.m_bQL )
uDescFlags |= 8;
tOut.SendDword ( uDescFlags );
if ( uDescFlags & 1 )
tOut.SendString ( tDesc.m_sQuery.cstr () );
if ( uDescFlags & 2 )
tOut.SendString ( tDesc.m_sTags.cstr () );
if ( uDescFlags & 4 )
tOut.SendString ( tDesc.m_sFilters.cstr () );
}
}
// send meta
tOut.SendUint64 ( tRes.m_tmTotal );
tOut.SendUint64 ( tRes.m_tmSetup );
tOut.SendInt ( tRes.m_iQueriesMatched );
tOut.SendInt ( tRes.m_iQueriesFailed );
tOut.SendInt ( tRes.m_iDocsMatched );
tOut.SendInt ( tRes.m_iTotalQueries );
tOut.SendInt ( tRes.m_iOnlyTerms );
tOut.SendInt ( tRes.m_iEarlyOutQueries );
tOut.SendInt ( tRes.m_dQueryDT.GetLength () );
for ( int iDT : tRes.m_dQueryDT )
tOut.SendInt ( iDT );
tOut.SendString ( tRes.m_sMessages.sWarning () );
}
static void SendMysqlPercolateReply ( RowBuffer_i & tOut, const CPqResult & tResult, int iShift=0 )
{
// shortcuts
const PercolateMatchResult_t &tRes = tResult.m_dResult;
const CSphFixedVector<DocID_t> &dDocids = tResult.m_dDocids;
bool bDumpDocs = tRes.m_bGetDocs;
bool bQuery = tRes.m_bGetQuery;
// result set header packet. We will attach EOF manually at the end.
tOut.HeadBegin ();
tOut.HeadColumn ( "id", MYSQL_COL_LONGLONG );
if ( bDumpDocs )
tOut.HeadColumn ( "documents" );
if ( bQuery )
{
tOut.HeadColumn ( "query" );
tOut.HeadColumn ( "tags" );
tOut.HeadColumn ( "filters" );
}
// EOF packet is sent explicitly due to non-default params.
auto iWarns = tRes.m_sMessages.WarnEmpty () ? 0 : 1;
tOut.HeadEnd ( false, iWarns );
CSphVector<int64_t> dTmpDocs;
int iDocOff = -1;
StringBuilder_c sDocs;
for ( const auto &tDesc : tRes.m_dQueryDesc )
{
tOut.PutNumAsString ( tDesc.m_iQUID );
if ( bDumpDocs )
{
sDocs.StartBlock ( "," );
// document count + document id(s)
auto iCount = ( int ) ( tRes.m_dDocs[++iDocOff] );
if ( dDocids.GetLength () ) // need de-duplicate docs
{
dTmpDocs.Resize ( iCount );
for ( int iDoc = 0; iDoc<iCount; ++iDoc )
{
RowID_t tRow = tRes.m_dDocs[++iDocOff];
dTmpDocs[iDoc] = dDocids[tRow];
}
dTmpDocs.Uniq ();
for ( auto dTmpDoc : dTmpDocs )
sDocs.Sprintf ( "%l", dTmpDoc );
} else
{
for ( int iDoc = 0; iDoc<iCount; ++iDoc )
{
RowID_t tRow = tRes.m_dDocs[++iDocOff];
sDocs.Sprintf ( "%u", tRow + iShift );
}
}
tOut.PutString ( sDocs );
sDocs.Clear ();
}
if ( bQuery )
{
tOut.PutString ( tDesc.m_sQuery );
tOut.PutString ( tDesc.m_sTags );
tOut.PutString ( tDesc.m_sFilters );
}
if ( !tOut.Commit() )
return;
}
tOut.Eof ( false, iWarns );
}
// process one(!) local(!) pq index
static void PQLocalMatch ( const BlobVec_t & dDocs, const CSphString & sIndex, const PercolateOptions_t & tOpt, CSphSessionAccum & tAcc, CPqResult & tResult, int iStart, int iDocs )
{
CSphString sWarning, sError;
auto &sMsg = tResult.m_dResult.m_sMessages;
tResult.m_dResult.m_bGetDocs = tOpt.m_bGetDocs;
tResult.m_dResult.m_bVerbose = tOpt.m_bVerbose;
tResult.m_dResult.m_bGetQuery = tOpt.m_bGetQuery;
sMsg.Clear ();
if ( !iDocs || ( iStart + iDocs )>dDocs.GetLength () )
iDocs = dDocs.GetLength () - iStart;
if ( !iDocs )
return sMsg.Warn ( "No more docs for sparse matching" );
auto pServed = GetServed ( sIndex );
if ( !pServed )
return sMsg.Err ( "unknown local table '%s' in search request", sIndex.cstr () );
if ( pServed->m_eType!=IndexType_e::PERCOLATE )
return sMsg.Err ( "table '%s' is not percolate", sIndex.cstr () );
RIdx_T<PercolateIndex_i*> pIndex { pServed };
RtAccum_t * pAccum = tAcc.GetAcc ( pIndex, sError );
sMsg.Err ( sError );
if ( !sMsg.ErrEmpty () )
return;
const CSphSchema & tSchema = pIndex->GetInternalSchema();
int iFieldsCount = tSchema.GetFieldsCount();
InsertDocData_c tDoc(tSchema);
// set defaults
int iAttrsCount = tSchema.GetAttrsCount ();
for ( int i = 0; i<iAttrsCount; ++i )
{
const CSphColumnInfo & tCol = tSchema.GetAttr(i);
CSphAttrLocator tLoc = tCol.m_tLocator;
tLoc.m_bDynamic = true;
CSphMatchVariant::SetDefaultAttr ( tDoc.m_tDoc, tLoc, tCol.m_eAttrType );
}
int iStrCounter = 0;
int iMvaCounter = 0;
SchemaItemHash_c hSchemaLocators;
if ( tOpt.m_bJsonDocs )
{
// hash attrs
for ( int i = 0; i<iAttrsCount; ++i )
{
const CSphColumnInfo &tCol = tSchema.GetAttr ( i );
SchemaItemVariant_t tAttr;
tAttr.m_tLoc = tCol.m_tLocator;
tAttr.m_tLoc.m_bDynamic = true; /// was just set above
tAttr.m_eType = tCol.m_eAttrType;
if ( tCol.m_eAttrType==SPH_ATTR_STRING || tCol.m_eAttrType==SPH_ATTR_JSON )
tAttr.m_iStr = iStrCounter++;
if ( tCol.m_eAttrType==SPH_ATTR_UINT32SET || tCol.m_eAttrType==SPH_ATTR_INT64SET )
tAttr.m_iMva = iMvaCounter++;
hSchemaLocators.Add ( sphFNV64 ( tCol.m_sName.cstr () ), tAttr );
}
for ( int i = 0; i<iFieldsCount; ++i )
{
const CSphColumnInfo &tField = tSchema.GetField ( i );
SchemaItemVariant_t tAttr;
tAttr.m_iField = i;
hSchemaLocators.Add ( sphFNV64 ( tField.m_sName.cstr () ), tAttr );
}
} else
{
// even without JSON docs MVA should match to schema definition on inserting data into accumulator
for ( int i = 0; i<iAttrsCount; ++i )
{
const CSphColumnInfo &tCol = tSchema.GetAttr ( i );
if ( tCol.m_eAttrType==SPH_ATTR_UINT32SET || tCol.m_eAttrType==SPH_ATTR_INT64SET )
++iMvaCounter;
}
}
const CSphColumnInfo * pId = tSchema.GetAttr ( sphGetDocidName () );
assert( pId );
CSphAttrLocator tIdLoc = pId->m_tLocator;
tIdLoc.m_bDynamic = true;
int iDocsNoIdCount = 0;
bool bAutoId = tOpt.m_sIdAlias.IsEmpty ();
tResult.m_dDocids.Reset ( bAutoId ? 0 : iDocs + 1 );
int64_t uSeqDocid = 1;
tDoc.m_dStrings.Resize(iStrCounter);
StringPtrTraits_t tStrings;
tStrings.m_dOff.Reset ( iStrCounter );
CSphVector<int64_t> dMvaParsed ( iMvaCounter );
CSphString sTokenFilterOpts;
RowID_t tRowID = 0;
for ( auto iDoc = iStart; iDoc<iStart+iDocs; ++iDoc )
{
// doc-id
tDoc.m_tDoc.SetAttr ( tIdLoc, 0 );
tDoc.m_dFields[0] = dDocs[iDoc];
dMvaParsed.Resize ( iMvaCounter );
dMvaParsed.Fill ( 0 );
if ( tOpt.m_bJsonDocs )
{
// reset all back to defaults
tDoc.m_dFields.Fill ( { nullptr, 0 } );
for ( int i = 0; i<iAttrsCount; ++i )
{
const CSphColumnInfo &tCol = tSchema.GetAttr ( i );
CSphAttrLocator tLoc = tCol.m_tLocator;
tLoc.m_bDynamic = true;
CSphMatchVariant::SetDefaultAttr ( tDoc.m_tDoc, tLoc, tCol.m_eAttrType );
}
tStrings.Reset();
if ( !ParseBsonDocument ( dDocs[iDoc], hSchemaLocators, tOpt.m_sIdAlias, iDoc, tDoc.m_dFields, tDoc.m_tDoc, tStrings, dMvaParsed, tSchema, sMsg ) )
{
// for now the only case of fail - if provided bson is empty (null) document.
if ( tOpt.m_bSkipBadJson )
{
sMsg.Warn ( "ERROR: Document %d is empty", iDoc + tOpt.m_iShift + 1 );
continue;
}
sMsg.Err ( "Document %d is empty", iDoc + tOpt.m_iShift + 1 );
break;
}
tStrings.SavePointersTo ( tDoc.m_dStrings, false );
// convert back offset into tStrings buffer into pointers
for ( VecTraits_T<const char> & tField : tDoc.m_dFields )
{
if ( !tField.GetLength() )
continue;
int64_t iOff = int64_t( tField.Begin() );
int iLen = tField.GetLength();
tField = VecTraits_T<const char> ( (const char *)( tStrings.m_dPackedData.Begin()+iOff ), iLen );
}
}
tDoc.FixParsedMVAs ( dMvaParsed, iMvaCounter );
if ( !sMsg.ErrEmpty () )
break;
tDoc.m_tDoc.m_tRowID = ( RowID_t ) tRowID++;
if ( !bAutoId )
{
// in user-provides-id mode let's skip all docs without id
if ( !sphGetDocID ( tDoc.m_tDoc.m_pDynamic ) )
{
++iDocsNoIdCount;
continue;
}
// store provided doc-id for result set sending
tResult.m_dDocids[uSeqDocid] = ( int64_t ) sphGetDocID ( tDoc.m_tDoc.m_pDynamic );
tDoc.m_tDoc.SetAttr ( tIdLoc, uSeqDocid++ );
} else
tDoc.m_tDoc.SetAttr ( tIdLoc, iDoc + 1 ); // +1 since docid is 1-based
// PQ work with sequential document numbers, 0 element unused
// add document
pIndex->AddDocument ( tDoc, true, sTokenFilterOpts, sError, sWarning, pAccum );
sMsg.Err ( sError );
sMsg.Warn ( sWarning );
if ( !sMsg.ErrEmpty() )
break;
}
// fire exit
if ( !sMsg.ErrEmpty() )
{
pIndex->RollBack ( pAccum ); // clean up collected data
return;
}
pIndex->MatchDocuments ( pAccum, tResult.m_dResult );
if ( iDocsNoIdCount )
sMsg.Warn ( "skipped %d document(s) without id field '%s'", iDocsNoIdCount, tOpt.m_sIdAlias.cstr() );
}
void PercolateMatchDocuments ( const BlobVec_t & dDocs, const PercolateOptions_t & tOpts, CSphSessionAccum & tAcc, CPqResult & tResult )
{
CSphString sIndex = tOpts.m_sIndex;
CSphString sWarning, sError;
StrVec_t dLocalIndexes;
const auto * pLocalIndexes = &dLocalIndexes;
VecRefPtrsAgentConn_t dAgents;
auto pDist = GetDistr ( sIndex );
if ( pDist )
{
for ( const auto& pAgent : pDist->m_dAgents )
{
auto * pConn = new AgentConn_t;
pConn->SetMultiAgent ( pAgent );
pConn->m_iMyConnectTimeoutMs = pDist->GetAgentConnectTimeoutMs();
pConn->m_iMyQueryTimeoutMs = pDist->GetAgentQueryTimeoutMs();
dAgents.Add ( pConn );
}
pLocalIndexes = &pDist->m_dLocal;
} else
dLocalIndexes.Add ( sIndex );
// at this point we know total num of involved indexes,
// and can eventually split (sparse) docs among them.
int iChunks = 0;
if ( tOpts.m_eMode==PercolateOptions_t::unknown || tOpts.m_eMode==PercolateOptions_t::sparsed)
iChunks = dAgents.GetLength () + pLocalIndexes->GetLength ();
int iStart = 0;
int iStep = iChunks>1 ? ( ( dDocs.GetLength () - 1 ) / iChunks + 1 ) : 0;
bool bHaveRemotes = !dAgents.IsEmpty ();
int iSuccesses = 0;
int iAgentsDone = 0;
std::unique_ptr<PqRequestBuilder_c> pReqBuilder;
std::unique_ptr<ReplyParser_i> pParser;
CSphRefcountedPtr<RemoteAgentsObserver_i> pReporter { nullptr };
if ( bHaveRemotes )
{
pReqBuilder = std::make_unique<PqRequestBuilder_c> ( dDocs, tOpts, iStart, iStep );
iStart += iStep * dAgents.GetLength ();
pParser = std::make_unique<PqReplyParser_c>();
pReporter = GetObserver();
ScheduleDistrJobs ( dAgents, pReqBuilder.get(), pParser.get(), pReporter );
}
LazyVector_T <CPqResult> dLocalResults;
for ( const auto & sPqIndex : *pLocalIndexes )
{
auto & dResult = dLocalResults.Add();
PQLocalMatch ( dDocs, sPqIndex, tOpts, tAcc, dResult, iStart, iStep );
iStart += iStep;
}
if ( bHaveRemotes )
{
assert ( pReporter );
pReporter->Finish ();
iSuccesses = ( int ) pReporter->GetSucceeded ();
iAgentsDone = ( int ) pReporter->GetFinished ();
}
LazyVector_T<CPqResult*> dAllResults;
for ( auto & dLocalRes : dLocalResults )
dAllResults.Add ( &dLocalRes );
CPqResult dMsgs; // fake resultset just to grab errors from remotes
if ( iAgentsDone>iSuccesses )
dAllResults.Add ( &dMsgs );
if ( iAgentsDone )
{
for ( auto * pAgent : dAgents )
{
if ( !pAgent->m_bSuccess )
{
dMsgs.m_dResult.m_sMessages.Err ( pAgent->m_sFailure );
continue;
}
auto pResult = ( CPqResult * ) pAgent->m_pResult.get ();
if ( !pResult )
continue;
dAllResults.Add ( pResult );
}
}
MergePqResults ( dAllResults, tResult, iChunks<2 );
if ( iSuccesses!=iAgentsDone )
{
sphWarning ( "Remote PQ: some of the agents didn't answered: %d queried, %d finished, %d succeeded"
, dAgents.GetLength (), iAgentsDone, iSuccesses );
}
}
/// call PQ command over API
void HandleCommandCallPq ( ISphOutputBuffer &tOut, WORD uVer, InputBuffer_c &tReq ) REQUIRES ( HandlerThread )
{
if ( !CheckCommandVersion ( uVer, VER_COMMAND_CALLPQ, tOut ) )
return;
// options
PercolateOptions_t tOpts;
DWORD uFlags = tReq.GetDword ();
tOpts.m_bGetDocs = !!(uFlags & 1);
tOpts.m_bGetQuery = !!(uFlags & 2);
tOpts.m_bJsonDocs = !!(uFlags & 4);
tOpts.m_bVerbose = !!(uFlags & 8);
tOpts.m_bSkipBadJson = !! ( uFlags & 16 );
tOpts.m_sIdAlias = tReq.GetString();
// index name
tOpts.m_sIndex = tReq.GetString();
// document(s)
tOpts.m_iShift = tReq.GetInt();
BlobVec_t dDocs ( tReq.GetInt() );
for ( auto & sDoc : dDocs )
if ( !tReq.GetString ( sDoc ) )
{
SendErrorReply ( tOut, "Can't retrieve doc from input buffer" );
return;
}
// working
CSphSessionAccum tAcc;
CPqResult tResult;
PercolateMatchDocuments ( dDocs, tOpts, tAcc, tResult );
if ( tResult.m_dResult.m_iQueriesFailed )
tResult.m_dResult.m_sMessages.Err ( "%d queries failed", tResult.m_dResult.m_iQueriesFailed );
if ( !tResult.m_dResult.m_sMessages.ErrEmpty () )
{
SendErrorReply ( tOut, "%s", tResult.m_dResult.m_sMessages.sError() );
return;
}
SendAPIPercolateReply ( tOut, tResult, tOpts.m_iShift );
}
static void HandleMysqlCallPQ ( RowBuffer_i & tOut, SqlStmt_t & tStmt, CSphSessionAccum & tAcc, CPqResult & tResult )
{
StatCountCommand ( SEARCHD_COMMAND_CALLPQ );
PercolateMatchResult_t &tRes = tResult.m_dResult;
tRes.Reset();
// check arguments
// index name, document | documents list, [named opts]
if ( tStmt.m_dInsertValues.GetLength()!=2 )
{
tOut.Error ( "PQ() expects exactly 2 arguments (table, document(s))" );
return;
}
auto &dStmtIndex = tStmt.m_dInsertValues[0];
auto &dStmtDocs = tStmt.m_dInsertValues[1];
if ( dStmtIndex.m_iType!=SqlInsert_t::QUOTED_STRING )
{
tOut.Error ( "PQ() argument 1 must be a string" );
return;
}
if ( dStmtDocs.m_iType!=SqlInsert_t::QUOTED_STRING && dStmtDocs.m_iType!=SqlInsert_t::CONST_STRINGS )
{
tOut.Error ( "PQ() argument 2 must be a string or a string list" );
return;
}
// document(s)
StrVec_t dDocs;
if ( dStmtDocs.m_iType==SqlInsert_t::QUOTED_STRING )
dDocs.Add ( dStmtDocs.m_sVal );
else
dDocs.SwapData ( tStmt.m_dCallStrings );
// options last
CSphString sError;
PercolateOptions_t tOpts;
tOpts.m_sIndex = dStmtIndex.m_sVal;
SqlParser_SplitClusterIndex ( tOpts.m_sIndex, nullptr );
bool bSkipEmpty = false;
ARRAY_FOREACH ( i, tStmt.m_dCallOptNames )
{
CSphString & sOpt = tStmt.m_dCallOptNames[i];
const SqlInsert_t & v = tStmt.m_dCallOptValues[i];
sOpt.ToLower();
int iExpType = SqlInsert_t::CONST_INT;
if ( sOpt=="docs_id" )
{
tOpts.m_sIdAlias = v.m_sVal;
iExpType = SqlInsert_t::QUOTED_STRING;
sphColumnToLowercase ( const_cast<char *>( tOpts.m_sIdAlias.cstr() ) );
} else if ( sOpt=="docs" ) tOpts.m_bGetDocs = ( v.GetValueInt()!=0 );
else if ( sOpt=="verbose" ) tOpts.m_bVerbose = ( v.GetValueInt()!=0 );
else if ( sOpt=="docs_json" ) tOpts.m_bJsonDocs = ( v.GetValueInt()!=0 );
else if ( sOpt=="query" ) tOpts.m_bGetQuery = ( v.GetValueInt()!=0 );
else if ( sOpt=="skip_bad_json" ) tOpts.m_bSkipBadJson = ( v.GetValueInt()!=0 );
else if ( sOpt=="skip_empty" ) bSkipEmpty = true;
else if ( sOpt=="shift" ) tOpts.m_iShift = v.GetValueInt();
else if ( sOpt=="mode" )
{
auto sMode = v.m_sVal;
iExpType = SqlInsert_t::QUOTED_STRING;
sMode.ToLower();
if ( sMode=="sparsed" )
tOpts.m_eMode = PercolateOptions_t::sparsed;
else if ( sMode=="sharded" )
tOpts.m_eMode = PercolateOptions_t::sharded;
else
{
sError.SetSprintf ( "unknown mode %s. (Expected 'sparsed' or 'sharded')", v.m_sVal.cstr () );
break;
}
} else
{
sError.SetSprintf ( "unknown option %s", sOpt.cstr() );
break;
}
// post-conf type check
if ( iExpType!=v.m_iType )
{
sError.SetSprintf ( "unexpected option %s type", sOpt.cstr() );
break;
}
}
if ( tOpts.m_bSkipBadJson && !tOpts.m_bJsonDocs ) // fixme! do we need such warn? Uncomment, if so.
tRes.m_sMessages.Warn ( "option to skip bad json has no sense since docs are not in json form" );
if ( !sError.IsEmpty() )
{
tOut.Error ( sError.cstr() );
return;
}
BlobVec_t dBlobDocs;
dBlobDocs.Reserve ( dDocs.GetLength() ); // actually some docs may be complex
CSphVector<int> dBadDocs;
if ( !tOpts.m_bJsonDocs )
for ( auto &dDoc : dDocs )
dDoc.LeakToVec ( dBlobDocs.Add () );
else
ARRAY_FOREACH ( i, dDocs )
{
using namespace bson;
CSphVector<BYTE> dData;
if ( !sphJsonParse ( dData, (char *)dDocs[i].cstr(), g_bJsonAutoconvNumbers, g_bJsonKeynamesToLowercase, false, sError ) )
{
dBadDocs.Add ( i + 1 );
continue;
}
Bson_c dBson ( dData );
if ( dBson.IsArray () )
{
for ( BsonIterator_c dItem ( dBson ); dItem; dItem.Next() )
{
if ( dItem.IsAssoc () )
dItem.BsonToBson ( dBlobDocs.Add () );
else
{
dBadDocs.Add ( i + 1 ); // fixme! m.b. report it as 'wrong doc N in string M'?
break;
}
}
}
else if ( dBson.IsAssoc() )
{
dData.SwapData ( dBlobDocs.Add () );
}
else if ( bSkipEmpty && dBson.IsEmpty() )
continue;
else
dBadDocs.Add ( i + 1 ); // let it be just 'an error' for now
if ( !dBadDocs.IsEmpty() && !tOpts.m_bSkipBadJson )
break;
}
if ( !dBadDocs.IsEmpty() )
{
StringBuilder_c sBad ( ",", "Bad JSON objects in strings: " );
for ( int iBadDoc:dBadDocs )
sBad.Sprintf ( "%d", iBadDoc );
if ( !tOpts.m_bSkipBadJson )
{
tOut.Error ( sBad.cstr ());
return;
}
tRes.m_sMessages.Warn ( sBad.cstr () );
}
tResult.m_dDocids.Reset ( tOpts.m_sIdAlias.IsEmpty () ? 0 : dBlobDocs.GetLength () + 1 );
if ( tOpts.m_iShift && !tOpts.m_sIdAlias.IsEmpty () )
tRes.m_sMessages.Warn ( "'shift' option works only for automatic ids, when 'docs_id' is not defined" );
PercolateMatchDocuments ( dBlobDocs, tOpts, tAcc, tResult );
if ( !tRes.m_sMessages.ErrEmpty () )
{
tRes.m_sMessages.MoveAllTo ( sError );
tOut.Error ( sError.cstr () );
return;
}
SendMysqlPercolateReply ( tOut, tResult, tOpts.m_iShift );
}
void HandleMysqlPercolateMeta ( const CPqResult &tResult, const CSphString & sWarning, RowBuffer_i & tOut )
{
// shortcuts
const PercolateMatchResult_t &tMeta = tResult.m_dResult;
tOut.HeadTuplet ( "Name", "Value" );
tOut.DataTupletf ( "Total", "%.3D sec", tMeta.m_tmTotal / 1000 );
if ( tMeta.m_tmSetup && tMeta.m_tmSetup>0 )
tOut.DataTupletf ( "Setup", "%.3D sec", tMeta.m_tmSetup / 1000 );
tOut.DataTuplet ( "Queries matched", tMeta.m_iQueriesMatched );
tOut.DataTuplet ( "Queries failed", tMeta.m_iQueriesFailed );
tOut.DataTuplet ( "Document matched", tMeta.m_iDocsMatched );
tOut.DataTuplet ( "Total queries stored", tMeta.m_iTotalQueries );
tOut.DataTuplet ( "Term only queries", tMeta.m_iOnlyTerms );
tOut.DataTuplet ( "Fast rejected queries", tMeta.m_iEarlyOutQueries );
if ( !tMeta.m_dQueryDT.IsEmpty() )
{
uint64_t tmMatched = 0;
StringBuilder_c sList (", ");
assert ( tMeta.m_iQueriesMatched==tMeta.m_dQueryDT.GetLength() );
for ( int tmQuery : tMeta.m_dQueryDT )
{
sList.Sprintf ( "%d", tmQuery );
tmMatched += tmQuery;
}
tOut.DataTuplet ( "Time per query", sList.cstr() );
tOut.DataTuplet ( "Time of matched queries", tmMatched );
}
if ( !sWarning.IsEmpty() )
tOut.DataTuplet ( "Warning", sWarning.cstr() );
tOut.Eof();
}
static bool IsHttpStmt ( const SqlStmt_t & tStmt )
{
return !tStmt.m_sEndpoint.IsEmpty();
}
static void PopulateMapsFromIndexSchema ( CSphVector<int> & dAttrSchema, CSphVector<int> & dFieldSchema, const CSphSchema & tSchema )
{
assert ( tSchema.GetAttr(0).m_sName==sphGetDocidName() );
ARRAY_FOREACH ( i, dFieldSchema )
dFieldSchema[i] = i+1;
dAttrSchema[0]=0;
int iAttrId = dFieldSchema.GetLength()+1;
for ( int i = 1; i < dAttrSchema.GetLength(); i++ )
{
if ( sphIsInternalAttr ( tSchema.GetAttr(i) ) )
dAttrSchema[i]=-1;
else
{
// check for string field/attr with the same name
int iFieldId = tSchema.GetFieldIndex ( tSchema.GetAttr(i).m_sName.cstr() );
if ( iFieldId!=-1 )
dAttrSchema[i] = iFieldId+1;
else
dAttrSchema[i] = iAttrId++;
}
}
}
static bool CreateAttrMaps ( CSphVector<int> & dAttrSchema, CSphVector<int> & dFieldSchema, CSphVector<bool> & dFieldAttrs, const CSphSchema & tSchema, const StrVec_t & dStmtInsertSchema, StmtErrorReporter_i & tOut )
{
ARRAY_FOREACH ( i, dFieldAttrs )
dFieldAttrs[i] = false;
if ( !dStmtInsertSchema.GetLength() )
{
PopulateMapsFromIndexSchema ( dAttrSchema, dFieldSchema, tSchema );
return true;
}
// got a list of columns, check for 1) existance, 2) dupes
StrVec_t dCheck = dStmtInsertSchema;
ARRAY_FOREACH ( i, dCheck )
// OPTIMIZE! GetFieldIndex use linear searching. M.b. hash instead?
if ( tSchema.GetAttrIndex ( dCheck[i].cstr() )==-1 && tSchema.GetFieldIndex ( dCheck[i].cstr() )==-1 )
{
tOut.Error ( "unknown column: '%s'", dCheck[i].cstr() );
return false;
}
dCheck.Sort();
for ( int i=1; i<dCheck.GetLength(); i++ )
if ( dCheck[i-1]==dCheck[i] )
{
CSphString sError;
sError.SetSprintf ( "column '%s' specified twice", dCheck[i].cstr() );
tOut.ErrorEx ( EMYSQL_ERR::FIELD_SPECIFIED_TWICE, sError.cstr() );
return false;
}
// hash column list
// OPTIMIZE! hash index columns once (!) instead
SmallStringHash_T<int> dInsertSchema;
ARRAY_FOREACH ( i, dStmtInsertSchema )
dInsertSchema.Add ( i, dStmtInsertSchema[i] );
// map fields
ARRAY_FOREACH ( i, dFieldSchema )
{
const char * szFieldName = tSchema.GetFieldName(i);
if ( dInsertSchema.Exists(szFieldName) )
{
dFieldSchema[i] = dInsertSchema[szFieldName];
// does an attribute with the same name exist?
if ( tSchema.GetAttr(szFieldName) )
dFieldAttrs[i] = true;
} else
dFieldSchema[i] = -1;
}
// map attrs
ARRAY_FOREACH ( j, dAttrSchema )
{
const char * szAttrName = tSchema.GetAttr(j).m_sName.cstr();
if ( dInsertSchema.Exists(szAttrName) )
dAttrSchema[j] = dInsertSchema[szAttrName];
else
dAttrSchema[j] = -1;
}
return true;
}
/////////////////////////////////////////////////////////////////////
class AttributeConverter_c : public InsertDocData_c
{
public:
AttributeConverter_c ( const CSphSchema & tSchema, const CSphVector<bool> & dFieldAttrs, CSphString & sError, CSphString & sWarning );
bool SetAttrValue ( int iCol, const SqlInsert_t & tVal, int iRow, int iQuerySchemaIdx, CSphString & sError );
void SetDefaultAttrValue ( int iCol );
bool SetFieldValue ( int iField, const SqlInsert_t & tVal, int iRow, int iQuerySchemaIdx );
void SetDefaultFieldValue ( int iField );
void NewRow();
void Finalize();
private:
const CSphSchema & m_tSchema;
const CSphColumnInfo * m_pDocId = nullptr;
const CSphVector<bool> & m_dFieldAttrs;
StringPtrTraits_t m_tStrings;
StrVec_t m_dTmpFieldStorage;
CSphVector<int> m_dColumnarRemap;
CSphString & m_sError;
CSphString & m_sWarning;
bool String2JsonPack ( char * pStr, CSphVector<BYTE> & dBuf );
bool CheckStrings ( const CSphColumnInfo & tCol, const SqlInsert_t & tVal, int iCol, int iRow );
bool CheckJson ( const CSphColumnInfo & tCol, const SqlInsert_t & tVal );
bool CheckMVA ( const CSphColumnInfo & tCol, const SqlInsert_t & tVal, int iCol, int iRow );
bool CheckInsertTypes ( const CSphColumnInfo & tCol, const SqlInsert_t & tVal, int iRow, int iQuerySchemaIdx );
};
AttributeConverter_c::AttributeConverter_c ( const CSphSchema & tSchema, const CSphVector<bool> & dFieldAttrs, CSphString & sError, CSphString & sWarning )
: InsertDocData_c ( tSchema )
, m_tSchema ( tSchema )
, m_pDocId ( tSchema.GetAttr ( sphGetDocidName() ) )
, m_dFieldAttrs ( dFieldAttrs )
, m_sError ( sError )
, m_sWarning ( sWarning )
{
int iAttrs = tSchema.GetAttrsCount();
m_dTmpFieldStorage.Resize ( tSchema.GetFieldsCount() );
m_dColumnarRemap.Resize(iAttrs);
int iColumnarAttr = 0;
for ( int i = 0; i < iAttrs; i++ )
if ( m_tSchema.GetAttr(i).IsColumnar() )
m_dColumnarRemap[i] = iColumnarAttr++;
else
m_dColumnarRemap[i] = -1;
m_dColumnarAttrs.Resize(iColumnarAttr);
m_tStrings.m_dOff.Reset(iAttrs);
}
bool AttributeConverter_c::String2JsonPack ( char * pStr, CSphVector<BYTE> & dBuf )
{
dBuf.Resize ( 0 ); // buffer for JSON parser must be empty to properly set JSON_ROOT data
if ( !pStr )
return true;
if ( !sphJsonParse ( dBuf, pStr, g_bJsonAutoconvNumbers, g_bJsonKeynamesToLowercase, true, m_sError ) )
{
if ( g_bJsonStrict )
return false;
if ( m_sWarning.IsEmpty() )
m_sWarning = m_sError;
else
m_sWarning.SetSprintf ( "%s; %s", m_sWarning.cstr(), m_sError.cstr() );
m_sError = "";
}
return true;
}
bool AttributeConverter_c::CheckStrings ( const CSphColumnInfo & tCol, const SqlInsert_t & tVal, int iCol, int iRow )
{
if ( tCol.m_eAttrType!=SPH_ATTR_STRING && tCol.m_eAttrType!=SPH_ATTR_STRINGPTR )
return true;
if ( tVal.m_sVal.Length() > 0x3FFFFF )
{
*( char * ) ( tVal.m_sVal.cstr () + 0x3FFFFF ) = '\0';
m_sWarning.SetSprintf ( "String column %d at row %d too long, truncated to 4MB", iCol, iRow );
}
m_dStrings.Add ( tVal.m_sVal.cstr() );
return true;
}
bool AttributeConverter_c::CheckJson ( const CSphColumnInfo & tCol, const SqlInsert_t & tVal )
{
if ( tCol.m_eAttrType!=SPH_ATTR_JSON )
return true;
int iStrCount = m_dStrings.GetLength();
m_dStrings.Add ( nullptr );
// empty source string means NULL attribute
if ( tVal.m_sVal.IsEmpty() )
return true;
// sphJsonParse must be terminated with a double zero however usual CSphString have SAFETY_GAP of 4 zeros
if ( !String2JsonPack ( (char *)tVal.m_sVal.cstr(), m_tStrings.m_dParserBuf ) )
return false;
int iParsedLength = m_tStrings.m_dParserBuf.GetLength();
if ( iParsedLength )
{
m_tStrings.m_dOff[iStrCount] = m_tStrings.m_dPackedData.GetLength();
BYTE * pPacked = m_tStrings.m_dPackedData.AddN ( sphCalcPackedLength ( iParsedLength ) );
sphPackPtrAttr ( pPacked, m_tStrings.m_dParserBuf );
}
return true;
}
bool AttributeConverter_c::CheckMVA ( const CSphColumnInfo & tCol, const SqlInsert_t & tVal, int iCol, int iRow )
{
if ( tCol.m_eAttrType!=SPH_ATTR_UINT32SET && tCol.m_eAttrType!=SPH_ATTR_INT64SET && tCol.m_eAttrType!=SPH_ATTR_FLOAT_VECTOR )
return true;
if ( !tVal.m_pVals )
{
AddMVALength(0);
return true;
}
auto & tAddVals = *tVal.m_pVals;
if ( tCol.m_eAttrType==SPH_ATTR_FLOAT_VECTOR )
{
AddMVALength ( tAddVals.GetLength() );
for ( const auto & i : tAddVals )
AddMVAValue ( sphF2DW ( i.m_fValue ) );
return true;
}
// collect data from scattered insvals
// FIXME! maybe remove this mess, and just have a single m_dMvas pool in parser instead?
bool bFloatInMVA = false;
for ( const auto & i : tAddVals )
bFloatInMVA |= i.m_bFloat;
if ( bFloatInMVA )
m_sWarning.SetSprintf ( "MVA attribute %d at row %d: inserting float value", iCol, iRow );
tAddVals.Uniq();
AddMVALength ( tAddVals.GetLength() );
for ( const auto & i : tAddVals )
AddMVAValue ( i.m_iValue );
return true;
}
bool AttributeConverter_c::CheckInsertTypes ( const CSphColumnInfo & tCol, const SqlInsert_t & tVal, int iRow, int iQuerySchemaIdx )
{
// null fits all values as for now it sets default value for any type
if ( tVal.m_iType==SqlInsert_t::TOK_NULL )
return true;
if ( tVal.m_iType!=SqlInsert_t::QUOTED_STRING
&& tVal.m_iType!=SqlInsert_t::CONST_INT
&& tVal.m_iType!=SqlInsert_t::CONST_FLOAT
&& tVal.m_iType!=SqlInsert_t::CONST_MVA )
{
m_sError.SetSprintf ( "row %d, column %d: internal error: unknown insval type %d", 1+iRow, 1+iQuerySchemaIdx, tVal.m_iType ); // 1 for human base
return false;
}
if ( tVal.m_iType==SqlInsert_t::CONST_MVA && !( tCol.m_eAttrType==SPH_ATTR_UINT32SET || tCol.m_eAttrType==SPH_ATTR_INT64SET || tCol.m_eAttrType==SPH_ATTR_JSON || tCol.m_eAttrType==SPH_ATTR_FLOAT_VECTOR ) )
{
m_sError.SetSprintf ( "row %d, column %d: MVA value specified for a non-MVA column", 1+iRow, 1+iQuerySchemaIdx ); // 1 for human base
return false;
}
if ( ( tCol.m_eAttrType==SPH_ATTR_UINT32SET || tCol.m_eAttrType==SPH_ATTR_INT64SET ) && tVal.m_iType!=SqlInsert_t::CONST_MVA )
{
m_sError.SetSprintf ( "row %d, column %d: non-MVA value specified for a MVA column", 1+iRow, 1+iQuerySchemaIdx ); // 1 for human base
return false;
}
return true;
}
void AttributeConverter_c::SetDefaultAttrValue ( int iCol )
{
const CSphColumnInfo & tCol = m_tSchema.GetAttr(iCol);
CSphAttrLocator tLoc = tCol.m_tLocator;
tLoc.m_bDynamic = true;
if ( tCol.m_eAttrType==SPH_ATTR_STRING || tCol.m_eAttrType==SPH_ATTR_STRINGPTR || tCol.m_eAttrType==SPH_ATTR_JSON )
m_dStrings.Add(nullptr);
if ( tCol.m_eAttrType==SPH_ATTR_UINT32SET || tCol.m_eAttrType==SPH_ATTR_INT64SET || tCol.m_eAttrType==SPH_ATTR_FLOAT_VECTOR )
AddMVALength ( 0, true );
SqlInsert_t tDefaultVal;
tDefaultVal.m_iType = SqlInsert_t::CONST_INT;
tDefaultVal.SetValueInt(0);
SphAttr_t tAttr;
CSphString sError;
if ( CSphMatchVariant::ConvertPlainAttr ( tDefaultVal, tCol.m_eAttrType, &tCol.m_sName, tAttr, false, sError ) )
{
if ( tCol.IsColumnar() )
m_dColumnarAttrs [ m_dColumnarRemap[iCol] ] = tAttr;
else
m_tDoc.SetAttr ( tLoc, tAttr );
}
}
bool AttributeConverter_c::SetAttrValue ( int iCol, const SqlInsert_t & tVal, int iRow, int iQuerySchemaIdx, CSphString & sError )
{
const CSphColumnInfo & tCol = m_tSchema.GetAttr(iCol);
bool bDocId = tCol.m_sName == sphGetDocidName();
CSphAttrLocator tLoc = tCol.m_tLocator;
tLoc.m_bDynamic = true;
if ( !CheckInsertTypes ( tCol, tVal, iRow, iQuerySchemaIdx ) )
return false;
SphAttr_t tAttr;
if ( CSphMatchVariant::ConvertPlainAttr ( tVal, tCol.m_eAttrType, &tCol.m_sName, tAttr, bDocId, sError ) )
{
if ( tCol.IsColumnar() )
m_dColumnarAttrs [ m_dColumnarRemap[iCol] ] = tAttr;
else
m_tDoc.SetAttr ( tLoc, tAttr );
}
else
{
if ( !sError.IsEmpty() )
return false;
}
if ( !CheckStrings ( tCol, tVal, iCol, iRow ) ) return false;
if ( !CheckJson ( tCol, tVal ) ) return false;
if ( !CheckMVA ( tCol, tVal, iCol, iRow ) ) return false;
return true;
}
bool AttributeConverter_c::SetFieldValue ( int iField, const SqlInsert_t & tVal, int iRow, int iQuerySchemaIdx )
{
if ( tVal.m_iType!=SqlInsert_t::QUOTED_STRING && tVal.m_iType!=SqlInsert_t::TOK_NULL )
{
m_sError.SetSprintf ( "row %d, column %d: string expected", 1+iRow, 1+iQuerySchemaIdx ); // 1 for human base
return false;
}
const char * szFieldValue = tVal.m_sVal.scstr();
if ( m_dFieldAttrs[iField] )
{
m_dTmpFieldStorage[iField] = szFieldValue;
m_dFields[iField] = { m_dTmpFieldStorage[iField].cstr(), m_dTmpFieldStorage[iField].Length() };
} else
m_dFields[iField] = { szFieldValue, ( int64_t) strlen(szFieldValue) };
return true;
}
void AttributeConverter_c::SetDefaultFieldValue ( int iField )
{
m_dFields[iField] = { nullptr, 0 };
}
void AttributeConverter_c::NewRow()
{
m_dStrings.Resize(0);
m_tStrings.Reset();
ResetMVAs();
}
void AttributeConverter_c::Finalize()
{
// remap JSON to string pointers
m_tStrings.SavePointersTo ( m_dStrings );
}
/////////////////////////////////////////////////////////////////////
static bool InsertToPQ ( const SqlStmt_t & tStmt, RtIndex_i * pIndex, RtAccum_t * pAccum, CSphVector<int64_t> & dIds, const CSphMatch & tDoc, const CSphAttrLocator & tIdLoc, const CSphVector<const char *> & dStrings,
const CSphSchema & tSchemaInt, bool bReplace, CSphString & sError )
{
CSphVector<CSphFilterSettings> dFilters;
CSphVector<FilterTreeItem_t> dFilterTree;
if ( !PercolateParseFilters ( dStrings[2], session::GetCollation(), tSchemaInt, dFilters, dFilterTree, sError ) )
return false;
PercolateQueryArgs_t tArgs ( dFilters, dFilterTree );
tArgs.m_sQuery = dStrings[0];
tArgs.m_sTags = dStrings[1];
tArgs.m_iQUID = tDoc.GetAttr(tIdLoc);
tArgs.m_bReplace = bReplace;
tArgs.m_bQL = true;
// add query
auto * pQIndex = (PercolateIndex_i *)pIndex;
auto pStored = pQIndex->CreateQuery ( tArgs, sError );
if ( pStored )
{
auto * pCmd = pAccum->AddCommand ( ReplCmd_e::PQUERY_ADD, tStmt.m_sIndex, tStmt.m_sCluster );
dIds.Add ( pStored->m_iQUID );
pCmd->m_pStored = std::move ( pStored );
}
return true;
}
static bool CleanupAcc ( bool bMissed, RtAccum_t * pAccum, CSphString & sError )
{
assert ( pAccum );
sError.SetSprintf ( "can not finish transaction, table %s '%s'", ( bMissed ? "missed" : "changed" ), pAccum->GetIndexName().cstr() );
pAccum->Cleanup();
return false;
}
static bool CheckAccIndex ( CSphSessionAccum & tSession, CSphString & sError )
{
RtAccum_t * pAccum = tSession.GetAcc();
assert ( pAccum );
auto pServed = GetServed ( pAccum->GetIndexName() );
if ( !pServed )
return CleanupAcc ( true, pAccum, sError );
if ( pAccum->GetIndexId()!=RIdx_T<RtIndex_i*>( pServed )->GetIndexId() )
return CleanupAcc ( false, pAccum, sError );
return true;
}
void sphHandleMysqlBegin ( StmtErrorReporter_i& tOut, Str_t sQuery )
{
auto* pSession = session::GetClientSession();
auto& tAcc = pSession->m_tAcc;
auto& sError = pSession->m_sError;
MEMORY ( MEM_SQL_BEGIN );
if ( tAcc.GetIndex() )
{
if ( !CheckAccIndex ( tAcc, sError ) )
return tOut.Error ( "%s", sError.cstr() );
if ( !HandleCmdReplicate ( *tAcc.GetAcc() ) )
{
TlsMsg::MoveError ( sError );
return tOut.Error ( "%s", sError.cstr() );
}
}
pSession->m_bInTransaction = true;
tOut.Ok ( 0 );
}
void sphHandleMysqlCommitRollback ( StmtErrorReporter_i& tOut, Str_t sQuery, bool bCommit )
{
auto* pSession = session::GetClientSession();
auto& tAcc = pSession->m_tAcc;
auto& sError = pSession->m_sError;
auto& tCrashQuery = GlobalCrashQueryGetRef();
TRACE_CONN ( "conn", "sphHandleMysqlCommitRollback" );
MEMORY ( MEM_SQL_COMMIT );
pSession->m_bInTransaction = false;
RtIndex_i* pIndex = tAcc.GetIndex();
int iDeleted = 0;
if ( pIndex )
{
RtAccum_t * pAccum = tAcc.GetAcc();
tCrashQuery.m_dIndex = FromStr ( pAccum->GetIndexName() );
if ( !CheckAccIndex ( tAcc, sError ) )
return tOut.Error ( "%s", sError.cstr() );
if ( bCommit )
{
StatCountCommand ( SEARCHD_COMMAND_COMMIT );
if ( !HandleCmdReplicateDelete ( *pAccum, iDeleted ) )
{
TlsMsg::MoveError(sError);
tOut.Error ( "%s", sError.cstr() );
return;
}
} else
{
pIndex->RollBack ( pAccum );
}
}
tOut.Ok ( iDeleted );
}
static bool AddDocument ( const SqlStmt_t & tStmt, cServedIndexRefPtr_c & pServed, StmtErrorReporter_i & tOut );
static void CommitAcc ( const SqlStmt_t & tStmt, cServedIndexRefPtr_c & pServed, StmtErrorReporter_i & tOut );
void sphHandleMysqlInsert ( StmtErrorReporter_i & tOut, const SqlStmt_t & tStmt )
{
if ( !sphCheckWeCanModify ( tOut ) )
return;
auto* pSession = session::GetClientSession();
pSession->FreezeLastMeta();
bool bReplace = ( tStmt.m_eStmt == STMT_REPLACE );
StatCountCommand ( bReplace ? SEARCHD_COMMAND_REPLACE : SEARCHD_COMMAND_INSERT );
MEMORY ( MEM_SQL_INSERT );
auto tmStart = sphMicroTimer ();
auto pServed = GetServed ( tStmt.m_sIndex );
if ( !ServedDesc_t::IsMutable ( pServed ) )
{
tOut.Error ( "table '%s' absent, or does not support INSERT", tStmt.m_sIndex.cstr ());
return;
}
GlobalCrashQueryGetRef().m_dIndex = FromStr ( tStmt.m_sIndex );
// with index RLocked at the
if ( !AddDocument ( tStmt, pServed, tOut ) )
return;
// index lock after replication takes place
CommitAcc ( tStmt, pServed, tOut );
StatCountCommandDetails ( SearchdStats_t::eReplace, tStmt.m_iRowsAffected, tmStart );
}
// when index name came as `cluster:name`, it came to the name, and should be splitted to cluster and index name
void MaybeFixupIndexNameFromMysqldump ( SqlStmt_t & tStmt )
{
if ( g_pLocalIndexes->Contains ( tStmt.m_sIndex ) )
return;
auto dParts = sphSplit ( tStmt.m_sIndex.cstr (), ":" );
if ( dParts.GetLength ()!=2 )
return;
tStmt.m_sCluster = dParts[0];
tStmt.m_sIndex = dParts[1];
}
static bool AddDocument ( const SqlStmt_t & tStmt, cServedIndexRefPtr_c & pServed, StmtErrorReporter_i & tOut )
{
auto * pSession = session::GetClientSession();
auto & tAcc = pSession->m_tAcc;
bool bReplace = ( tStmt.m_eStmt == STMT_REPLACE );
auto & dLastIds = pSession->m_dLastIds;
CSphString sError;
auto & sWarning = pSession->m_tLastMeta.m_sWarning;
bool bPq = ( pServed->m_eType==IndexType_e::PERCOLATE );
RIdx_T<RtIndex_i*> pIndex { pServed };
// get schema, check values count
const CSphSchema & tSchema = pIndex->GetMatchSchema ();
int iSchemaSz = tSchema.GetAttrsCount() + tSchema.GetFieldsCount();
if ( pIndex->GetSettings().m_bIndexFieldLens )
iSchemaSz -= tSchema.GetFieldsCount();
// check for 'string indexed attribute'
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
{
auto & tAttr = tSchema.GetAttr(i);
if ( tAttr.m_eAttrType==SPH_ATTR_STRING && tSchema.GetField ( tAttr.m_sName.cstr() ) )
iSchemaSz--;
}
if ( tSchema.GetAttr ( sphGetBlobLocatorName() ) )
iSchemaSz--;
int iExp = tStmt.m_iSchemaSz;
int iGot = tStmt.m_dInsertValues.GetLength();
if ( !tStmt.m_dInsertSchema.GetLength() && iSchemaSz!=tStmt.m_iSchemaSz )
{
tOut.Error ( "column count does not match schema (expected %d, got %d)", iSchemaSz, iGot );
return false;
}
if ( ( iGot % iExp )!=0 )
{
tOut.Error ( "column count does not match value count (expected %d, got %d)", iExp, iGot );
return false;
}
if ( !ValidateClusterStatement ( tStmt.m_sIndex, *pServed, tStmt.m_sCluster, IsHttpStmt ( tStmt ) ) )
TlsMsg::MoveError ( sError );
if ( !sError.IsEmpty() )
{
tOut.ErrorEx ( EMYSQL_ERR::PARSE_ERROR, sError.cstr() );
return false;
}
CSphVector<int> dAttrSchema ( tSchema.GetAttrsCount() );
CSphVector<int> dFieldSchema ( tSchema.GetFieldsCount() );
CSphVector<bool> dFieldAttrs ( tSchema.GetFieldsCount() );
if ( !CreateAttrMaps ( dAttrSchema, dFieldSchema, dFieldAttrs, tSchema, tStmt.m_dInsertSchema, tOut ) )
return false;
RtAccum_t * pAccum = tAcc.GetAcc ( pIndex, sError );
if ( !sError.IsEmpty() )
{
tOut.ErrorEx ( EMYSQL_ERR::PARSE_ERROR, sError.cstr() );
return false;
}
CSphVector<int64_t> dIds;
dIds.Reserve ( tStmt.m_iRowsAffected );
const CSphColumnInfo * pDocid = tSchema.GetAttr(sphGetDocidName());
assert ( pDocid );
CSphAttrLocator tIdLoc = pDocid->m_tLocator;
tIdLoc.m_bDynamic = true;
AttributeConverter_c tConverter ( tSchema, dFieldAttrs, sError, sWarning );
// convert attrs
for ( int iRow=0; iRow<tStmt.m_iRowsAffected; iRow++ )
{
assert ( sError.IsEmpty() );
tConverter.NewRow();
int iSchemaAttrCount = tSchema.GetAttrsCount();
if ( pIndex->GetSettings().m_bIndexFieldLens )
iSchemaAttrCount -= tSchema.GetFieldsCount();
bool bOk = true;
for ( int i=0; i<iSchemaAttrCount && bOk; i++ )
{
int iQuerySchemaIdx = dAttrSchema[i];
if ( iQuerySchemaIdx < 0 )
tConverter.SetDefaultAttrValue(i);
else
bOk = tConverter.SetAttrValue ( i, tStmt.m_dInsertValues[iQuerySchemaIdx + iRow * iExp], iRow, iQuerySchemaIdx, sError );
}
if ( !bOk )
break;
// if strings and fields share one value, it might be modified by html stripper etc
// we need to use separate storage for such string attributes and fields
for ( int i = 0; i < tSchema.GetFieldsCount() && bOk; i++ )
{
int iQuerySchemaIdx = dFieldSchema[i];
if ( iQuerySchemaIdx < 0 )
tConverter.SetDefaultFieldValue(i);
else
bOk = tConverter.SetFieldValue( i, tStmt.m_dInsertValues [ iQuerySchemaIdx + iRow * iExp ], iRow, iQuerySchemaIdx );
}
if ( !bOk )
break;
tConverter.Finalize();
// do add
if ( bPq )
{
if ( !InsertToPQ ( tStmt, pIndex, pAccum, dIds, tConverter.m_tDoc, tIdLoc, tConverter.m_dStrings, pIndex->GetInternalSchema(), bReplace, sError ) )
break;
}
else
{
pIndex->AddDocument ( tConverter, bReplace, tStmt.m_sStringParam, sError, sWarning, pAccum );
dIds.Add ( tConverter.GetID() );
pAccum->AddCommand ( ReplCmd_e::RT_TRX, tStmt.m_sIndex, tStmt.m_sCluster );
}
if ( !sError.IsEmpty() )
break;
}
// fire exit
if ( !sError.IsEmpty() )
{
pIndex->RollBack ( pAccum ); // clean up collected data
tOut.ErrorEx ( EMYSQL_ERR::PARSE_ERROR, sError.cstr() );
return false;
}
dLastIds.SwapData ( dIds );
return true;
}
static void CommitAcc ( const SqlStmt_t & tStmt, cServedIndexRefPtr_c & pServed, StmtErrorReporter_i & tOut )
{
auto * pSession = session::GetClientSession();
CSphString sError;
auto & sWarning = pSession->m_tLastMeta.m_sWarning;
bool bCommit = ( pSession->m_bAutoCommit && !pSession->m_bInTransaction );
auto & dLastIds = pSession->m_dLastIds;
// no errors so far
if ( bCommit )
{
RtAccum_t * pAccum = pSession->m_tAcc.GetAcc();
RIdx_T<RtIndex_i *> pIndex { pServed };
assert ( pSession->m_tAcc.GetAcc ( pIndex, sError )==pAccum );
if ( !HandleCmdReplicate ( *pAccum ) )
{
TlsMsg::MoveError ( sError );
pIndex->RollBack ( pAccum ); // clean up collected data
tOut.Error ( "%s", sError.cstr() );
return;
}
}
int64_t iLastInsertId = 0;
if ( dLastIds.GetLength() )
iLastInsertId = dLastIds.Last();
// my OK packet
tOut.Ok ( tStmt.m_iRowsAffected, sWarning, iLastInsertId );
}
void HandleMysqlCallSnippets ( RowBuffer_i & tOut, SqlStmt_t & tStmt )
{
StatCountCommand ( SEARCHD_COMMAND_EXCERPT );
CSphString sError;
// check arguments
// string data, string index, string query, [named opts]
if ( tStmt.m_dInsertValues.GetLength()!=3 )
{
tOut.Error ( "SNIPPETS() expects exactly 3 arguments (data, table, query)" );
return;
}
if ( tStmt.m_dInsertValues[0].m_iType!=SqlInsert_t::QUOTED_STRING && tStmt.m_dInsertValues[0].m_iType!=SqlInsert_t::CONST_STRINGS )
{
tOut.Error ( "SNIPPETS() argument 1 must be a string or a string list" );
return;
}
if ( tStmt.m_dInsertValues[1].m_iType!=SqlInsert_t::QUOTED_STRING )
{
tOut.Error ( "SNIPPETS() argument 2 must be a string" );
return;
}
if ( tStmt.m_dInsertValues[2].m_iType!=SqlInsert_t::QUOTED_STRING )
{
tOut.Error ( "SNIPPETS() argument 3 must be a string" );
return;
}
// do magics
CSphString sIndex = tStmt.m_dInsertValues[1].m_sVal;
SnippetQuerySettings_t q;
q.m_sQuery = tStmt.m_dInsertValues[2].m_sVal;
ARRAY_FOREACH ( i, tStmt.m_dCallOptNames )
{
CSphString & sOpt = tStmt.m_dCallOptNames[i];
const SqlInsert_t & v = tStmt.m_dCallOptValues[i];
sOpt.ToLower();
int iExpType = -1;
if ( sOpt=="before_match" ) { q.m_sBeforeMatch = v.m_sVal; iExpType = SqlInsert_t::QUOTED_STRING; }
else if ( sOpt=="after_match" ) { q.m_sAfterMatch = v.m_sVal; iExpType = SqlInsert_t::QUOTED_STRING; }
else if ( sOpt=="chunk_separator" || sOpt=="snippet_separator" ) { q.m_sChunkSeparator = v.m_sVal; iExpType = SqlInsert_t::QUOTED_STRING; }
else if ( sOpt=="html_strip_mode" ) { q.m_sStripMode = v.m_sVal; iExpType = SqlInsert_t::QUOTED_STRING; }
else if ( sOpt=="passage_boundary" || sOpt=="snippet_boundary" ) { q.m_ePassageSPZ = GetPassageBoundary(v.m_sVal); iExpType = SqlInsert_t::QUOTED_STRING; }
else if ( sOpt=="limit" ) { q.m_iLimit = (int)v.GetValueInt(); iExpType = SqlInsert_t::CONST_INT; }
else if ( sOpt=="limit_words" ) { q.m_iLimitWords = (int)v.GetValueInt(); iExpType = SqlInsert_t::CONST_INT; }
else if ( sOpt=="limit_passages" || sOpt=="limit_snippets" ) { q.m_iLimitPassages = (int)v.GetValueInt(); iExpType = SqlInsert_t::CONST_INT; }
else if ( sOpt=="around" ) { q.m_iAround = (int)v.GetValueInt(); iExpType = SqlInsert_t::CONST_INT; }
else if ( sOpt=="start_passage_id" || sOpt=="start_snippet_id" ) { q.m_iPassageId = (int)v.GetValueInt(); iExpType = SqlInsert_t::CONST_INT; }
else if ( sOpt=="exact_phrase" )
{
sError.SetSprintf ( "exact_phrase is deprecated" );
break;
}
else if ( sOpt=="use_boundaries" ) { q.m_bUseBoundaries = ( v.GetValueInt()!=0 ); iExpType = SqlInsert_t::CONST_INT; }
else if ( sOpt=="weight_order" ) { q.m_bWeightOrder = ( v.GetValueInt()!=0 ); iExpType = SqlInsert_t::CONST_INT; }
else if ( sOpt=="query_mode" )
{
bool bQueryMode = ( v.GetValueInt()!=0 );
iExpType = SqlInsert_t::CONST_INT;
if ( !bQueryMode )
{
sError.SetSprintf ( "query_mode=0 is deprecated" );
break;
}
}
else if ( sOpt=="force_all_words" ) { q.m_bForceAllWords = ( v.GetValueInt()!=0 ); iExpType = SqlInsert_t::CONST_INT; }
else if ( sOpt=="load_files" ) { q.m_uFilesMode = ( v.GetValueInt()!=0 )?1:0; iExpType = SqlInsert_t::CONST_INT; }
else if ( sOpt=="load_files_scattered" ) { q.m_uFilesMode |= ( v.GetValueInt()!=0 )?2:0; iExpType = SqlInsert_t::CONST_INT; }
else if ( sOpt=="allow_empty" ) { q.m_bAllowEmpty = ( v.GetValueInt()!=0 ); iExpType = SqlInsert_t::CONST_INT; }
else if ( sOpt=="emit_zones" ) { q.m_bEmitZones = ( v.GetValueInt()!=0 ); iExpType = SqlInsert_t::CONST_INT; }
else if ( sOpt=="force_passages" || sOpt=="force_snippets" ) { q.m_bForcePassages = ( v.GetValueInt()!=0 ); iExpType = SqlInsert_t::CONST_INT; }
else
{
sError.SetSprintf ( "unknown option %s", sOpt.cstr() );
break;
}
// post-conf type check
if ( iExpType!=v.m_iType )
{
sError.SetSprintf ( "unexpected option %s type", sOpt.cstr() );
break;
}
}
if ( !sError.IsEmpty() )
{
tOut.Error ( sError.cstr() );
return;
}
if ( !sphCheckOptionsSPZ ( q, q.m_ePassageSPZ, sError ) )
{
tOut.Error ( sError.cstr() );
return;
}
q.Setup();
CSphVector<ExcerptQuery_t> dQueries;
if ( tStmt.m_dInsertValues[0].m_iType==SqlInsert_t::QUOTED_STRING )
{
auto& dQuery = dQueries.Add ();
dQuery.m_sSource = tStmt.m_dInsertValues[0].m_sVal; // OPTIMIZE?
} else
{
dQueries.Resize ( tStmt.m_dCallStrings.GetLength() );
ARRAY_FOREACH ( i, tStmt.m_dCallStrings )
{
dQueries[i].m_sSource = tStmt.m_dCallStrings[i]; // OPTIMIZE?
}
}
myinfo::SetTaskInfo ( R"(sphinxql-snippet datasize=%.1Dk query="%s")", GetSnippetDataSize ( dQueries ), q.m_sQuery.scstr ());
if ( !MakeSnippets ( sIndex, dQueries, q, sError ) )
{
tOut.Error ( sError.cstr() );
return;
}
if ( !dQueries.any_of ( [] ( const ExcerptQuery_t & tQuery ) { return tQuery.m_sError.IsEmpty(); } ) )
{
// just one last error instead of all errors is hopefully ok
sError.SetSprintf ( "highlighting failed: %s", sError.cstr() );
tOut.Error ( sError.cstr() );
return;
}
// result set header packet
tOut.HeadBegin ();
tOut.HeadColumn("snippet");
tOut.HeadEnd ();
// data
for ( auto & i : dQueries )
{
FixupResultTail ( i.m_dResult );
tOut.PutArray ( i.m_dResult );
if ( !tOut.Commit() )
break;
}
tOut.Eof();
}
class KeywordsRequestBuilder_c : public RequestBuilder_i
{
public:
KeywordsRequestBuilder_c ( const GetKeywordsSettings_t & tSettings, const CSphString & sTerm );
void BuildRequest ( const AgentConn_t & tAgent, ISphOutputBuffer & tOut ) const final;
protected:
const GetKeywordsSettings_t & m_tSettings;
const CSphString & m_sTerm;
};
class KeywordsReplyParser_c : public ReplyParser_i
{
public:
KeywordsReplyParser_c ( bool bGetStats, CSphVector<CSphKeywordInfo> & dKeywords );
bool ParseReply ( MemInputBuffer_c & tReq, AgentConn_t & tAgent ) const final;
bool m_bStats;
CSphVector<CSphKeywordInfo> & m_dKeywords;
};
static void LimitKeywords ( int iLimit, CSphVector<CSphKeywordInfo> & dKeywords );
static void SortKeywords ( const GetKeywordsSettings_t & tSettings, CSphVector<CSphKeywordInfo> & dKeywords );
bool DoGetKeywords ( const CSphString & sIndex, const CSphString & sQuery, const GetKeywordsSettings_t & tSettings, CSphVector <CSphKeywordInfo> & dKeywords, CSphString & sError, SearchFailuresLog_c & tFailureLog )
{
auto pLocal = GetServed ( sIndex );
auto pDistributed = GetDistr ( sIndex );
if ( !pLocal && !pDistributed )
{
sError.SetSprintf ( "no such table %s", sIndex.cstr() );
return false;
}
bool bOk = false;
// just local plain or template index
if ( pLocal )
bOk = RIdx_c(pLocal)->GetKeywords ( dKeywords, sQuery.cstr(), tSettings, &sError );
else
{
// FIXME!!! g_iDistThreads thread pool for locals.
// locals
const StrVec_t & dLocals = pDistributed->m_dLocal;
CSphVector<CSphKeywordInfo> dKeywordsLocal;
for ( const CSphString &sLocal : dLocals )
{
auto pServed = GetServed ( sLocal );
if ( !pServed )
{
tFailureLog.Submit ( sLocal.cstr(), sIndex.cstr(), "missed table" );
continue;
}
dKeywordsLocal.Resize(0);
if ( RIdx_c ( pServed )->GetKeywords ( dKeywordsLocal, sQuery.cstr(), tSettings, &sError ) )
dKeywords.Append ( dKeywordsLocal );
else
tFailureLog.SubmitEx ( sLocal, sIndex.cstr (), "keyword extraction failed: %s", sError.cstr () );
}
// remote agents requests send off thread
VecRefPtrsAgentConn_t dAgents;
// fixme! We don't need all hosts here, only usual selected mirrors
pDistributed->GetAllHosts ( dAgents );
int iAgentsReply = 0;
if ( !dAgents.IsEmpty() )
{
// connect to remote agents and query them
KeywordsRequestBuilder_c tReqBuilder ( tSettings, sQuery );
KeywordsReplyParser_c tParser ( tSettings.m_bStats, dKeywords );
iAgentsReply = PerformRemoteTasks ( dAgents, &tReqBuilder, &tParser );
for ( const AgentConn_t * pAgent : dAgents )
if ( !pAgent->m_bSuccess && !pAgent->m_sFailure.IsEmpty() )
tFailureLog.SubmitEx ( pAgent->m_tDesc.m_sIndexes, sIndex.cstr(),
"agent %s: %s", pAgent->m_tDesc.GetMyUrl().cstr(), pAgent->m_sFailure.cstr() );
}
// process result sets
if ( dLocals.GetLength() + iAgentsReply>1 )
UniqKeywords ( dKeywords );
bOk = true;
}
SortKeywords ( tSettings, dKeywords );
if ( tSettings.m_iExpansionLimit )
LimitKeywords ( tSettings.m_iExpansionLimit, dKeywords );
return bOk;
}
void HandleMysqlCallKeywords ( RowBuffer_i & tOut, SqlStmt_t & tStmt, CSphString & sWarning )
{
StatCountCommand ( SEARCHD_COMMAND_KEYWORDS );
CSphString sError;
// string query, string index, [bool hits] || [value as option_name, ...]
int iArgs = tStmt.m_dInsertValues.GetLength();
if ( iArgs<2
|| iArgs>3
|| tStmt.m_dInsertValues[0].m_iType!=SqlInsert_t::QUOTED_STRING
|| tStmt.m_dInsertValues[1].m_iType!=SqlInsert_t::QUOTED_STRING
|| ( iArgs==3 && tStmt.m_dInsertValues[2].m_iType!=SqlInsert_t::CONST_INT ) )
{
tOut.Error ( "bad argument count or types in KEYWORDS() call" );
return;
}
GetKeywordsSettings_t tSettings;
tSettings.m_bStats = ( iArgs==3 && tStmt.m_dInsertValues[2].GetValueInt()!=0 );
ARRAY_FOREACH ( i, tStmt.m_dCallOptNames )
{
CSphString & sOpt = tStmt.m_dCallOptNames[i];
sOpt.ToLower ();
const auto & sVal = tStmt.m_dCallOptValues[i].m_sVal;
bool bEnabled = ( tStmt.m_dCallOptValues[i].GetValueInt()!=0 );
bool bOptInt = true;
if ( sOpt=="stats" )
tSettings.m_bStats = bEnabled;
else if ( sOpt=="fold_lemmas" )
tSettings.m_bFoldLemmas = bEnabled;
else if ( sOpt=="fold_blended" )
tSettings.m_bFoldBlended = bEnabled;
else if ( sOpt=="fold_wildcards" )
tSettings.m_bFoldWildcards = bEnabled;
else if ( sOpt=="expansion_limit" )
tSettings.m_iExpansionLimit = int ( tStmt.m_dCallOptValues[i].GetValueInt() );
else if ( sOpt=="sort_mode" )
{
// FIXME!!! add more sorting modes
if ( sVal!="docs" && sVal!="hits" )
{
sError.SetSprintf ( "unknown option %s mode '%s'", sOpt.cstr(), sVal.cstr() );
tOut.Error ( sError.cstr() );
return;
}
tSettings.m_bSortByDocs = sVal=="docs";
tSettings.m_bSortByHits = sVal=="hits";
bOptInt = false;
}
else if ( sOpt=="jieba_mode" )
{
if ( !StrToJiebaMode ( tSettings.m_eJiebaMode, sVal, sError ) )
{
tOut.Error ( sError.cstr() );
return;
}
bOptInt = false;
}
else
{
sError.SetSprintf ( "unknown option %s", sOpt.cstr () );
tOut.Error ( sError.cstr() );
return;
}
// post-conf type check
if ( bOptInt && tStmt.m_dCallOptValues[i].m_iType!=SqlInsert_t::CONST_INT )
{
sError.SetSprintf ( "unexpected option %s type", sOpt.cstr () );
tOut.Error ( sError.cstr () );
return;
}
}
const CSphString & sTerm = tStmt.m_dInsertValues[0].m_sVal;
const CSphString & sIndex = tStmt.m_dInsertValues[1].m_sVal;
CSphVector<CSphKeywordInfo> dKeywords;
SearchFailuresLog_c tFailureLog;
if ( !DoGetKeywords ( sIndex, sTerm, tSettings, dKeywords, sError, tFailureLog ) )
{
tOut.Error ( sError.cstr() );
return;
}
// result set header packet
tOut.HeadBegin();
tOut.HeadColumn("qpos");
tOut.HeadColumn("tokenized");
tOut.HeadColumn("normalized");
if ( tSettings.m_bStats )
{
tOut.HeadColumn("docs");
tOut.HeadColumn("hits");
}
tOut.HeadEnd();
// data
char sBuf[16];
ARRAY_FOREACH ( i, dKeywords )
{
snprintf ( sBuf, sizeof(sBuf), "%d", dKeywords[i].m_iQpos );
tOut.PutString ( sBuf );
tOut.PutString ( dKeywords[i].m_sTokenized );
tOut.PutString ( dKeywords[i].m_sNormalized );
if ( tSettings.m_bStats )
{
snprintf ( sBuf, sizeof(sBuf), "%d", dKeywords[i].m_iDocs );
tOut.PutString ( sBuf );
snprintf ( sBuf, sizeof(sBuf), "%d", dKeywords[i].m_iHits );
tOut.PutString ( sBuf );
}
if ( !tOut.Commit() )
break;
}
// put network errors and warnings to meta as warning
int iWarnings = 0;
if ( !tFailureLog.IsEmpty() )
{
iWarnings = tFailureLog.GetReportsCount();
StringBuilder_c sErrorBuf;
tFailureLog.BuildReport ( sErrorBuf );
sErrorBuf.MoveTo ( sWarning );
sphWarning ( "%s", sWarning.cstr() );
}
tOut.Eof ( false, iWarnings );
}
KeywordsRequestBuilder_c::KeywordsRequestBuilder_c ( const GetKeywordsSettings_t & tSettings, const CSphString & sTerm )
: m_tSettings ( tSettings )
, m_sTerm ( sTerm )
{
}
void KeywordsRequestBuilder_c::BuildRequest ( const AgentConn_t & tAgent, ISphOutputBuffer & tOut ) const
{
const CSphString & sIndexes = tAgent.m_tDesc.m_sIndexes;
auto tHdr = APIHeader ( tOut, SEARCHD_COMMAND_KEYWORDS, VER_COMMAND_KEYWORDS );
tOut.SendString ( m_sTerm.cstr() );
tOut.SendString ( sIndexes.cstr() );
tOut.SendInt ( m_tSettings.m_bStats );
tOut.SendInt ( m_tSettings.m_bFoldLemmas );
tOut.SendInt ( m_tSettings.m_bFoldBlended );
tOut.SendInt ( m_tSettings.m_bFoldWildcards );
tOut.SendInt ( m_tSettings.m_iExpansionLimit );
tOut.SendInt ( (int)m_tSettings.m_eJiebaMode );
}
KeywordsReplyParser_c::KeywordsReplyParser_c ( bool bGetStats, CSphVector<CSphKeywordInfo> & dKeywords )
: m_bStats ( bGetStats )
, m_dKeywords ( dKeywords )
{
}
bool KeywordsReplyParser_c::ParseReply ( MemInputBuffer_c & tReq, AgentConn_t & ) const
{
int iWords = tReq.GetInt();
int iLen = m_dKeywords.GetLength();
m_dKeywords.Resize ( iWords + iLen );
for ( int i=0; i<iWords; i++ )
{
CSphKeywordInfo & tWord = m_dKeywords[i + iLen];
tWord.m_sTokenized = tReq.GetString();
tWord.m_sNormalized = tReq.GetString();
tWord.m_iQpos = tReq.GetInt();
if ( m_bStats )
{
tWord.m_iDocs = tReq.GetInt();
tWord.m_iHits = tReq.GetInt();
}
}
return true;
}
struct KeywordSorterDocs_fn
{
bool IsLess ( const CSphKeywordInfo & a, const CSphKeywordInfo & b ) const
{
return ( ( a.m_iQpos<b.m_iQpos )
|| ( a.m_iQpos==b.m_iQpos && a.m_iDocs>b.m_iDocs )
|| ( a.m_iQpos==b.m_iQpos && a.m_iDocs==b.m_iDocs && a.m_sNormalized<b.m_sNormalized ) );
}
};
void SortKeywords ( const GetKeywordsSettings_t & tSettings, CSphVector<CSphKeywordInfo> & dKeywords )
{
if ( !tSettings.m_bSortByDocs && !tSettings.m_bSortByHits )
return;
if ( tSettings.m_bSortByHits )
dKeywords.Sort ( KeywordSorter_fn() );
else
dKeywords.Sort ( KeywordSorterDocs_fn() );
}
struct KeywordLimiter_fn
{
const int m_iLimit = 0;
int m_iCount = 0;
KeywordLimiter_fn ( int iLimit )
: m_iLimit ( iLimit )
{}
bool IsEq ( const CSphKeywordInfo & tKw1, const CSphKeywordInfo & tKw2 )
{
if ( tKw1.m_iQpos!=tKw2.m_iQpos )
{
m_iCount = 0;
return false;
}
m_iCount++;
return ( m_iCount>=m_iLimit );
}
};
void LimitKeywords ( int iLimit, CSphVector<CSphKeywordInfo> & dKeywords )
{
assert ( iLimit>0 );
KeywordLimiter_fn tLimit ( iLimit );
int iLen = sphUniq ( dKeywords.Begin(), dKeywords.GetLength(), tLimit );
dKeywords.Resize ( iLen );
}
// sort by distance asc, document count desc, ABC asc
struct CmpDistDocABC_fn
{
const char * m_pBuf;
explicit CmpDistDocABC_fn ( const char * pBuf ) : m_pBuf ( pBuf ) {}
inline bool IsLess ( const SuggestWord_t & a, const SuggestWord_t & b ) const
{
if ( a.m_iDistance==b.m_iDistance && a.m_iDocs==b.m_iDocs )
{
return ( sphDictCmpStrictly ( m_pBuf + a.m_iNameOff, a.m_iLen, m_pBuf + b.m_iNameOff, b.m_iLen )<0 );
}
if ( a.m_iDistance==b.m_iDistance )
return a.m_iDocs>=b.m_iDocs;
return a.m_iDistance<b.m_iDistance;
}
};
static void SuggestSendResult ( const SuggestArgs_t & tArgs, SuggestResultSet_t & tRes, const CSphString & sSentence, RowBuffer_i & tOut )
{
if ( tArgs.m_bResultOneline )
{
// let's resort by alphabet to better compare result sets
CmpDistDocABC_fn tCmp ( (const char *)( tRes.m_dBuf.Begin() ) );
tRes.m_dMatched.Sort ( tCmp );
// result set header packet
tOut.HeadBegin ();
tOut.HeadColumn ( "name" );
tOut.HeadColumn ( "value" );
tOut.HeadEnd ();
StringBuilder_c sBuf ( "," );
for ( auto& dMatched : tRes.m_dMatched )
sBuf << (const char*) tRes.m_dBuf.Begin() + dMatched.m_iNameOff;
tOut.PutString ( "suggests" );
tOut.PutString ( sBuf.cstr() );
if ( !tOut.Commit() )
return;
if ( tArgs.m_bResultStats )
{
sBuf.Clear ();
sBuf.StartBlock ( "," );
for ( auto &dMatched : tRes.m_dMatched )
sBuf.Appendf ( "%d", dMatched.m_iDistance );
tOut.PutString ( "distance" );
tOut.PutString ( sBuf.cstr () );
if ( !tOut.Commit() )
return;
sBuf.Clear ();
sBuf.StartBlock ( "," );
for ( auto &dMatched : tRes.m_dMatched )
sBuf.Appendf ( "%d", dMatched.m_iDocs );
tOut.PutString ( "docs" );
tOut.PutString ( sBuf );
if ( !tOut.Commit() )
return;
}
} else
{
// result set header packet
tOut.HeadBegin ();
tOut.HeadColumn ( "suggest" );
if ( tArgs.m_bResultStats )
{
tOut.HeadColumn ( "distance" );
tOut.HeadColumn ( "docs" );
}
tOut.HeadEnd ();
StringBuilder_c sBuf;
auto * szResult = (const char *)( tRes.m_dBuf.Begin() );
ARRAY_FOREACH ( i, tRes.m_dMatched )
{
const SuggestWord_t & tWord = tRes.m_dMatched[i];
if ( tArgs.m_bSentence && !sSentence.IsEmpty() )
{
sBuf.Clear();
sBuf.Appendf ( "%s %s", sSentence.cstr(), ( szResult + tWord.m_iNameOff ) );
tOut.PutString ( sBuf );
} else
{
tOut.PutString ( szResult + tWord.m_iNameOff );
}
if ( tArgs.m_bResultStats )
{
tOut.PutNumAsString ( tWord.m_iDistance );
tOut.PutNumAsString ( tWord.m_iDocs );
}
if ( !tOut.Commit() )
return;
}
}
tOut.Eof();
}
static bool SuggestLocalIndexGet ( const cServedIndexRefPtr_c & pServed, const SuggestArgs_t & tArgs, const char * sWord, SuggestResult_t & tRes, CSphString & sError )
{
RIdx_c pIdx { pServed };
if ( !pIdx->GetSettings().m_iMinInfixLen || !pIdx->GetDictionary()->GetSettings().m_bWordDict )
{
sError.SetSprintf ( "suggests work only for keywords dictionary with infix enabled" );
return false;
}
if ( tRes.SetWord ( sWord, pIdx->GetQueryTokenizer(), tArgs.m_bQueryMode, tArgs.m_bSentence ) )
pIdx->GetSuggest ( tArgs, tRes );
return true;
}
static void MergetRestultSets ( const SuggestResult_t & tSrc, SuggestResult_t & tDst )
{
if ( tDst.m_sSentence.IsEmpty() )
tDst.m_sSentence = tSrc.m_sSentence;
const BYTE * sBuf = ( tSrc.m_dBuf.Begin() );
for ( const auto & tSrcWord : tSrc.m_dMatched )
{
auto & tDstWord = tDst.m_dMatched.Add();
tDstWord = tSrcWord;
tDstWord.m_iNameOff = tDst.m_dBuf.GetLength();
BYTE * pDst = tDst.m_dBuf.AddN ( tSrcWord.m_iLen + 1 );
memcpy ( pDst, sBuf + tSrcWord.m_iNameOff, tSrcWord.m_iLen + 1 );
}
}
static void UniqRestultSets ( int iLimit, SuggestResult_t & tRes )
{
SuggestMergeDocs ( tRes.m_dMatched );
CmpDistDocABC_fn tCmp ( (const char *)( tRes.m_dBuf.Begin() ) );
tRes.m_dMatched.Sort ( tCmp );
tRes.Flattern ( iLimit );
}
/// Suggest Flags
enum class SuggestFlags_e : DWORD
{
NON_CHAR_ALLOWED = 1UL << 0,
SENTENCE = 1UL << 1,
QUERY_MODE = 1UL << 2,
};
static void SendSuggestReply ( const SuggestResult_t & tRes, ISphOutputBuffer & tOut )
{
auto tReply = APIAnswer ( tOut, VER_COMMAND_SUGGEST );
tOut.SendString ( tRes.m_sSentence.cstr() );
const BYTE * pBuf = tRes.m_dBuf.Begin();
tOut.SendInt ( tRes.m_dMatched.GetLength() );
for ( const auto & tWord : tRes.m_dMatched )
{
tOut.SendInt ( tWord.m_iDistance );
tOut.SendInt ( tWord.m_iDocs );
tOut.SendInt ( tWord.m_iLen );
tOut.SendBytes ( pBuf + tWord.m_iNameOff, tWord.m_iLen );
}
}
void HandleCommandSuggest ( ISphOutputBuffer & tOut, WORD uVer, InputBuffer_c & tReq )
{
if ( !CheckCommandVersion ( uVer, VER_COMMAND_SUGGEST, tOut ) )
return;
// parse request
CSphString sIndex = tReq.GetString();
CSphString sWord = tReq.GetString();
SuggestArgs_t tArgs;
tArgs.m_iLimit = tReq.GetInt();
tArgs.m_iDeltaLen = tReq.GetInt();
tArgs.m_iQueueLen = tReq.GetInt();
tArgs.m_iRejectThr = tReq.GetInt();
tArgs.m_iMaxEdits = tReq.GetInt();
DWORD uFlags = tReq.GetDword();
tArgs.m_bNonCharAllowed = !!( uFlags & (DWORD)SuggestFlags_e::NON_CHAR_ALLOWED );
tArgs.m_bSentence = !!( uFlags & (DWORD)SuggestFlags_e::SENTENCE );
tArgs.m_bQueryMode = !!( uFlags & (DWORD)SuggestFlags_e::QUERY_MODE );
if ( tReq.GetError() )
{
SendErrorReply ( tOut, "invalid or truncated request" );
return;
}
auto pServed = GetServed ( sIndex );
if ( !pServed )
{
SendErrorReply ( tOut, "missed table %s", sIndex.cstr() );
return;
}
CSphString sError;
SuggestResult_t tRes;
if ( !SuggestLocalIndexGet ( pServed, tArgs, sWord.cstr(), tRes, sError ) )
{
SendErrorReply ( tOut, "%s", sError.cstr() );
return;
}
SendSuggestReply ( tRes, tOut );
}
class SuggestRequestBuilder_c : public RequestBuilder_i
{
public:
SuggestRequestBuilder_c ( const SuggestArgs_t & tArgs, const char * sWord )
: m_tArgs ( tArgs )
, m_sWord ( sWord )
{}
void BuildRequest ( const AgentConn_t & tAgent, ISphOutputBuffer & tOut ) const final
{
auto tHdr = APIHeader ( tOut, SEARCHD_COMMAND_SUGGEST, VER_COMMAND_SUGGEST );
tOut.SendString ( tAgent.m_tDesc.m_sIndexes.cstr() );
tOut.SendString ( m_sWord );
tOut.SendInt ( m_tArgs.m_iLimit );
tOut.SendInt ( m_tArgs.m_iDeltaLen );
tOut.SendInt ( m_tArgs.m_iQueueLen );
tOut.SendInt ( m_tArgs.m_iRejectThr );
tOut.SendInt ( m_tArgs.m_iMaxEdits );
DWORD uFlags = 0;
uFlags |= (DWORD)SuggestFlags_e::NON_CHAR_ALLOWED * m_tArgs.m_bNonCharAllowed;
uFlags |= (DWORD)SuggestFlags_e::SENTENCE * m_tArgs.m_bSentence;
uFlags |= (DWORD)SuggestFlags_e::QUERY_MODE * m_tArgs.m_bQueryMode;
tOut.SendDword ( uFlags );
}
protected:
const SuggestArgs_t & m_tArgs;
const char * m_sWord;
};
class SuggestReplyParser_c : public ReplyParser_i
{
public:
SuggestReplyParser_c ( SuggestResultSet_t & tRes, CSphString & sSentence )
: m_tRes ( tRes )
, m_sSentence ( sSentence )
{}
bool ParseReply ( MemInputBuffer_c & tReq, AgentConn_t & ) const final
{
CSphString sSentence = tReq.GetString();
if ( m_sSentence.IsEmpty() )
m_sSentence = sSentence;
int iWords = tReq.GetInt();
int iOff = m_tRes.m_dMatched.GetLength();
m_tRes.m_dMatched.Resize ( iOff + iWords );
for ( int i=0; i<iWords; i++ )
{
SuggestWord_t & tWord = m_tRes.m_dMatched[iOff + i];
tWord.m_iDistance = tReq.GetInt();
tWord.m_iDocs = tReq.GetInt();
int iWordLen = tReq.GetInt();
tWord.m_iNameOff = m_tRes.m_dBuf.GetLength();
tWord.m_iLen = iWordLen + 1;
BYTE * pDst = m_tRes.m_dBuf.AddN ( iWordLen + 1 );
tReq.GetBytes ( pDst, iWordLen );
pDst[iWordLen] = '\0';
tWord.m_iNameHash = sphCRC32 ( pDst, iWordLen );
}
return true;
}
SuggestResultSet_t & m_tRes;
CSphString & m_sSentence;
};
static bool SuggestDistIndexGet ( const cDistributedIndexRefPtr_t & pDistributed, const CSphString & sIndex, const SuggestArgs_t & tArgs, const char * sWord, SuggestResult_t & tRes, CSphString & sError )
{
const StrVec_t & dLocals = pDistributed->m_dLocal;
for ( const CSphString & sLocal : dLocals )
{
auto pServed = GetServed ( sLocal );
if ( !pServed )
{
sError.SetSprintf ( sLocal.cstr(), sIndex.cstr(), "missed table %s at %s ", sLocal.cstr(), sIndex.cstr() );
return false;
}
SuggestResult_t tCur;
if ( !SuggestLocalIndexGet ( pServed, tArgs, sWord, tCur, sError ) )
return false;
MergetRestultSets ( tCur, tRes );
}
// remote agents requests send off thread
VecRefPtrsAgentConn_t dAgents;
// fixme! We don't need all hosts here, only usual selected mirrors
pDistributed->GetAllHosts ( dAgents );
int iAgentsReply = 0;
if ( !dAgents.IsEmpty() )
{
SuggestResult_t tCur;
// connect to remote agents and query them
SuggestRequestBuilder_c tReqBuilder ( tArgs, sWord );
SuggestReplyParser_c tParser ( tRes, tRes.m_sSentence );
iAgentsReply = PerformRemoteTasks ( dAgents, &tReqBuilder, &tParser );
for ( const AgentConn_t * pAgent : dAgents )
if ( !pAgent->m_bSuccess && !pAgent->m_sFailure.IsEmpty() )
sError.SetSprintf ( "agent %s: %s", pAgent->m_tDesc.GetMyUrl().cstr(), pAgent->m_sFailure.cstr() );
}
// sort and relimit results sets
if ( ( iAgentsReply + dLocals.GetLength() )>1 )
UniqRestultSets ( tArgs.m_iLimit, tRes );
return true;
}
void HandleMysqlCallSuggest ( RowBuffer_i & tOut, SqlStmt_t & tStmt, bool bQueryMode )
{
StatCountCommand ( SEARCHD_COMMAND_SUGGEST );
CSphString sError;
// string query, string index, [value as option_name, ...]
int iArgs = tStmt.m_dInsertValues.GetLength ();
if ( iArgs<2
|| iArgs>3
|| tStmt.m_dInsertValues[0].m_iType!=SqlInsert_t::QUOTED_STRING
|| tStmt.m_dInsertValues[1].m_iType!=SqlInsert_t::QUOTED_STRING
|| ( iArgs==3 && tStmt.m_dInsertValues[2].m_iType!=SqlInsert_t::CONST_INT ) )
{
tOut.Error ( "bad argument count or types in KEYWORDS() call" );
return;
}
SuggestArgs_t tArgs;
SuggestResult_t tRes;
const char * sWord = tStmt.m_dInsertValues[0].m_sVal.cstr();
tArgs.m_bQueryMode = bQueryMode;
ARRAY_FOREACH ( i, tStmt.m_dCallOptNames )
{
CSphString & sOpt = tStmt.m_dCallOptNames[i];
sOpt.ToLower ();
int iTokType = SqlInsert_t::CONST_INT;
if ( sOpt=="limit" )
{
tArgs.m_iLimit = int ( tStmt.m_dCallOptValues[i].GetValueInt() );
} else if ( sOpt=="delta_len" )
{
tArgs.m_iDeltaLen = int ( tStmt.m_dCallOptValues[i].GetValueInt() );
} else if ( sOpt=="max_matches" )
{
tArgs.m_iQueueLen = int ( tStmt.m_dCallOptValues[i].GetValueInt() );
} else if ( sOpt=="reject" )
{
tArgs.m_iRejectThr = int ( tStmt.m_dCallOptValues[i].GetValueInt() );
} else if ( sOpt=="max_edits" )
{
tArgs.m_iMaxEdits = int ( tStmt.m_dCallOptValues[i].GetValueInt() );
} else if ( sOpt=="result_line" )
{
tArgs.m_bResultOneline = ( tStmt.m_dCallOptValues[i].GetValueInt()!=0 );
} else if ( sOpt=="result_stats" )
{
tArgs.m_bResultStats = ( tStmt.m_dCallOptValues[i].GetValueInt()!=0 );
} else if ( sOpt=="non_char" )
{
tArgs.m_bNonCharAllowed = ( tStmt.m_dCallOptValues[i].GetValueInt()!=0 );
} else if ( sOpt=="sentence" )
{
tArgs.m_bSentence = ( tStmt.m_dCallOptValues[i].GetValueInt()!=0 );
} else
{
sError.SetSprintf ( "unknown option %s", sOpt.cstr () );
tOut.Error ( sError.cstr () );
return;
}
// post-conf type check
if ( tStmt.m_dCallOptValues[i].m_iType!=iTokType )
{
sError.SetSprintf ( "unexpected option %s type", sOpt.cstr () );
tOut.Error ( sError.cstr () );
return;
}
}
const CSphString & sIndex = tStmt.m_dInsertValues[1].m_sVal;
{
auto pLocal = GetServed ( sIndex );
auto pDistributed = GetDistr ( sIndex );
if ( !pLocal && !pDistributed )
{
sError.SetSprintf ( "no such table %s", sIndex.cstr() );
tOut.Error ( sError.cstr () );
return;
}
if ( pLocal && !SuggestLocalIndexGet ( pLocal, tArgs, sWord, tRes, sError ) )
{
tOut.Error ( sError.cstr () );
return;
}
if ( pDistributed && !SuggestDistIndexGet ( pDistributed, sIndex, tArgs, sWord, tRes, sError ) )
{
tOut.Error ( sError.cstr () );
return;
}
}
SuggestSendResult ( tArgs, tRes, tRes.m_sSentence, tOut );
}
static CSphString DescribeAttributeProperties ( const CSphColumnInfo & tAttr )
{
StringBuilder_c sProps(" ");
if ( tAttr.IsColumnar() )
sProps << "columnar";
if ( tAttr.IsIndexedKNN() )
sProps << "knn";
if ( tAttr.IsIndexedSI() )
sProps << "indexed";
if ( tAttr.m_uAttrFlags & CSphColumnInfo::ATTR_STORED )
sProps << "fast_fetch";
if ( tAttr.IsColumnar() && tAttr.m_eAttrType==SPH_ATTR_STRING && !(tAttr.m_uAttrFlags & CSphColumnInfo::ATTR_COLUMNAR_HASHES) )
sProps << "no_hash";
return sProps.cstr();
}
static void AddFieldDesc ( VectorLike & dOut, const CSphColumnInfo & tField, const CSphSchema & tSchema )
{
if ( !dOut.MatchAdd ( tField.m_sName.cstr() ) )
return;
const CSphColumnInfo * pAttr = tSchema.GetAttr ( tField.m_sName.cstr() );
dOut.Add ( pAttr ? "string" : "text" );
StringBuilder_c sProperties ( " " );
DWORD uFlags = tField.m_uFieldFlags;
if ( uFlags & CSphColumnInfo::FIELD_INDEXED )
sProperties << "indexed";
if ( uFlags & CSphColumnInfo::FIELD_STORED )
sProperties << "stored";
if ( pAttr )
{
sProperties << "attribute";
CSphString sProps = DescribeAttributeProperties ( *pAttr );
if ( !sProps.IsEmpty() )
sProperties << sProps;
}
dOut.Add ( sProperties.cstr () );
}
static void AddAttributeDesc ( VectorLike & dOut, const CSphColumnInfo & tAttr, const CSphSchema & tSchema )
{
if ( sphIsInternalAttr ( tAttr ) )
return;
if ( tSchema.GetField ( tAttr.m_sName.cstr() ) )
return; // already described it as a field property
if ( dOut.MatchAdd ( tAttr.m_sName.cstr() ) )
{
if ( tAttr.m_eAttrType==SPH_ATTR_INTEGER && tAttr.m_tLocator.m_iBitCount!=ROWITEM_BITS && tAttr.m_tLocator.m_iBitCount>0 )
{
StringBuilder_c sName;
sName.Sprintf ( "%s:%d", sphTypeName ( tAttr.m_eAttrType ), tAttr.m_tLocator.m_iBitCount );
dOut.Add ( sName.cstr() );
} else
dOut.Add ( sphTypeName ( tAttr.m_eAttrType ) );
dOut.Add ( DescribeAttributeProperties(tAttr) );
}
}
void ShowFields ( VectorLike& dOut, const CSphSchema& tSchema )
{
// result set header packet
dOut.SetColNames ( { "Field", "Type", "Null", "Key", "Default", "Extra" } );
auto Tail = [&dOut](const auto& tCol) { dOut.Add (tCol); dOut.Add ( "NO" ); dOut.Add ( "" ); dOut.Add ( "" ); dOut.Add ( "" ); };
assert ( tSchema.GetAttr ( 0 ).m_sName == sphGetDocidName() );
const auto& tId = tSchema.GetAttr ( 0 );
if ( dOut.MatchAdd ( tId.m_sName.cstr() ) )
Tail ( sphTypeName ( tId.m_eAttrType ) );
for ( int i = 0; i < tSchema.GetFieldsCount(); ++i )
{
const auto& tField = tSchema.GetField ( i );
if ( !dOut.MatchAdd ( tField.m_sName.cstr() ) )
continue;
const CSphColumnInfo* pAttr = tSchema.GetAttr ( tField.m_sName.cstr() );
Tail ( pAttr ? "string" : "text" );
}
for ( int i = 1; i < tSchema.GetAttrsCount(); ++i ) // from 1, as 0 is docID and already emerged
{
const auto& tAttr = tSchema.GetAttr ( i );
if ( sphIsInternalAttr ( tAttr ) )
continue;
if ( tAttr.m_eAttrType==SPH_ATTR_TOKENCOUNT )
continue;
if ( tSchema.GetField ( tAttr.m_sName.cstr() ) )
continue; // already described it as a field property
if ( !dOut.MatchAdd ( tAttr.m_sName.cstr() ) )
continue;
if ( tAttr.m_eAttrType == SPH_ATTR_INTEGER && tAttr.m_tLocator.m_iBitCount != ROWITEM_BITS && tAttr.m_tLocator.m_iBitCount > 0 )
Tail ( SphSprintf ( "%s:%d", sphTypeName ( tAttr.m_eAttrType ), tAttr.m_tLocator.m_iBitCount ) );
else
Tail ( sphTypeName ( tAttr.m_eAttrType ) );
}
}
void DescribeLocalSchema ( VectorLike & dOut, const CSphSchema & tSchema, bool bIsTemplate, bool bShowFields )
{
if ( bShowFields )
return ShowFields ( dOut, tSchema );
// result set header packet
dOut.SetColNames ( { "Field", "Type", "Properties" } );
// id comes before fields
if ( !bIsTemplate )
{
assert ( tSchema.GetAttr(0).m_sName==sphGetDocidName() );
AddAttributeDesc ( dOut, tSchema.GetAttr(0), tSchema );
}
for ( int i = 0; i<tSchema.GetFieldsCount (); ++i )
AddFieldDesc ( dOut, tSchema.GetField(i), tSchema );
for ( int i = 1; i<tSchema.GetAttrsCount (); ++i )
AddAttributeDesc ( dOut, tSchema.GetAttr(i), tSchema );
}
void DescribeDistributedSchema ( VectorLike& dOut, const cDistributedIndexRefPtr_t& pDistr )
{
// result set header packet
dOut.SetColNames ( { "Agent", "Type" } );
for ( const auto & sIdx : pDistr->m_dLocal )
dOut.MatchTuplet( sIdx.cstr (), "local" );
ARRAY_CONSTFOREACH ( i, pDistr->m_dAgents )
{
MultiAgentDescRefPtr_c pMultiAgent = pDistr->m_dAgents[i];
const MultiAgentDesc_c & tMultiAgent = *pMultiAgent;
if ( tMultiAgent.IsHA () )
{
int iNumMultiAgents = tMultiAgent.GetLength();
for ( int j = 0; j < iNumMultiAgents; j++ )
{
const AgentDesc_t & tDesc = tMultiAgent[j];
StringBuilder_c sValue;
sValue << tDesc.GetMyUrl().cstr() << ":" << tDesc.m_sIndexes.cstr();
dOut.MatchTupletf ( sValue.cstr (), "%s_%d_mirror_%d",
tDesc.m_bBlackhole ? "blackhole" : "remote", i+1, j+1 );
}
} else
{
const AgentDesc_t & tDesc = tMultiAgent[0];
StringBuilder_c sValue;
sValue << tDesc.GetMyUrl ().cstr () << ":" << tDesc.m_sIndexes.cstr ();
dOut.MatchTupletf ( sValue.cstr (), "%s_%d", tDesc.m_bBlackhole ? "blackhole" : "remote", i+1 );
}
}
}
inline static bool ClusterFlavour () noexcept
{
return !g_sClusterUser.IsEmpty () && session::GetClientSession ()->m_sUser==g_sClusterUser;
}
void HandleMysqlDescribe ( RowBuffer_i & tOut, const SqlStmt_t * pStmt )
{
auto & tStmt = *pStmt;
VectorLike dOut ( tStmt.m_sStringParam, 0 );
auto sName = tStmt.m_sIndex;
if ( ClusterFlavour() )
{
auto dParts = sphSplit( tStmt.m_sIndex.cstr(), ":");
if ( dParts.GetLength()>1 )
sName = dParts[1];
}
auto pServed = GetServed ( sName );
if ( pServed )
{
// data
const CSphSchema *pSchema = &RIdx_c(pServed)->GetMatchSchema ();
bool bNeedInternal = false;
if ( tStmt.m_iIntParam==SqlInsert_t::TABLE ) // user wants internal schema instead
bNeedInternal = true;
if ( tStmt.m_dStringSubkeys.GetLength()==1 && tStmt.m_dStringSubkeys[0].EqN(".table") )
bNeedInternal = true;
bool bShowFields = tStmt.m_iIntParam == -2; // -2 emitted in parser
if ( bNeedInternal && ServedDesc_t::IsMutable ( pServed ) && !bShowFields )
{
RIdx_T<const RtIndex_i*> pRtIndex { pServed };
pSchema = &pRtIndex->GetInternalSchema();
}
const CSphSchema &tSchema = *pSchema;
assert ( pServed->m_eType==IndexType_e::TEMPLATE || tSchema.GetAttr(0).m_sName==sphGetDocidName() );
DescribeLocalSchema ( dOut, tSchema, pServed->m_eType==IndexType_e::TEMPLATE, bShowFields );
} else
{
auto pDistr = GetDistr ( sName );
if ( !pDistr )
{
tOut.ErrorAbsent ( "no such table '%s'", tStmt.m_sIndex.cstr () );
return;
}
DescribeDistributedSchema ( dOut, pDistr );
}
tOut.DataTable ( dOut );
}
struct NamedIndexType_t
{
CSphString m_sName;
CSphString m_sCluster;
IndexType_e m_eType;
NamedIndexType_t() = default;
NamedIndexType_t ( NamedIndexType_t && ) noexcept = default;
NamedIndexType_t & operator= ( NamedIndexType_t && ) noexcept = default;
NamedIndexType_t ( const NamedIndexType_t & ) noexcept = default;
NamedIndexType_t & operator= ( const NamedIndexType_t & ) noexcept = default;
NamedIndexType_t ( CSphString sName, CSphString sCluster, IndexType_e eType )
: m_sName { std::move (sName) }
, m_sCluster { std::move (sCluster) }
, m_eType { eType }
{}
NamedIndexType_t ( CSphString sName, IndexType_e eType )
: m_sName { std::move (sName) }
, m_eType { eType }
{}
};
CSphVector<NamedIndexType_t> GetAllServedIndexes()
{
CSphVector<NamedIndexType_t> dIndexes;
// collect local, rt, percolate
ServedSnap_t hLocal = g_pLocalIndexes->GetHash();
for ( const auto& tIt : *hLocal )
{
if ( !tIt.second )
continue;
switch ( tIt.second->m_eType )
{
case IndexType_e::PLAIN:
case IndexType_e::RT:
case IndexType_e::PERCOLATE:
case IndexType_e::TEMPLATE:
dIndexes.Add ( { tIt.first, tIt.second->m_sCluster, tIt.second->m_eType } );
break;
default:
dIndexes.Add ( { tIt.first, IndexType_e::ERROR_ } );
}
}
// collect distributed
assert ( g_pDistIndexes );
auto pDistSnapshot = g_pDistIndexes->GetHash();
for ( auto& tIt : *pDistSnapshot )
// no need to check distr's it, iterating guarantees index existance.
dIndexes.Add ( { tIt.first, IndexType_e::DISTR } );
dIndexes.Sort ( Lesser ( [] ( const NamedIndexType_t& a, const NamedIndexType_t& b ) { return strcasecmp ( a.m_sName.cstr(), b.m_sName.cstr() ) < 0; } ) );
return dIndexes;
}
void HandleMysqlShowTables ( RowBuffer_i & tOut, const SqlStmt_t * pStmt )
{
auto dIndexes = GetAllServedIndexes();
bool bWithClusters = ClusterFlavour();
// output the results
VectorLike dTable ( pStmt->m_sStringParam, { "Table", "Type" } );
for ( auto& dPair : dIndexes )
{
if ( bWithClusters && !dPair.m_sCluster.IsEmpty ())
dTable.MatchTuplet ( SphSprintf ("%s:%s", dPair.m_sCluster.cstr(), dPair.m_sName.cstr()).cstr(), szIndexType ( dPair.m_eType ) );
else
dTable.MatchTuplet( dPair.m_sName.cstr (), szIndexType(dPair.m_eType) );
}
tOut.DataTable ( dTable );
}
template <typename T, typename GETNAME>
static bool CheckAttrs ( const VecTraits_T<T> & dAttrs, GETNAME && fnGetName, CSphString & sError )
{
ARRAY_FOREACH ( i, dAttrs )
{
const CSphString & sName = fnGetName(dAttrs[i]);
if ( CSphSchema::IsReserved ( sName.cstr() ) || sphIsInternalAttr ( sName ) )
{
sError.SetSprintf ( "attribute name '%s' is a reserved keyword", sName.cstr() );
return false;
}
for ( int j = i+1; j < dAttrs.GetLength(); j++ )
if ( fnGetName(dAttrs[j])==sName )
{
sError.SetSprintf ( "duplicate attribute name '%s'", sName.cstr() );
return false;
}
}
return true;
}
static bool CheckExistingTables ( const SqlStmt_t & tStmt, CSphString & sError )
{
if ( g_pLocalIndexes->Contains ( tStmt.m_sIndex ) || g_pDistIndexes->Contains ( tStmt.m_sIndex ) )
{
if ( tStmt.m_tCreateTable.m_bIfNotExists )
return true;
else
{
sError.SetSprintf ( "table '%s' already exists", tStmt.m_sIndex.cstr() );
return false;
}
}
if ( CSphSchema::IsReserved ( tStmt.m_sIndex.cstr() ) )
{
sError.SetSprintf ( "'%s' is a reserved keyword", tStmt.m_sIndex.cstr() );
return false;
}
return true;
}
static bool CheckCreateTable ( const SqlStmt_t & tStmt, CSphString & sError )
{
if ( !CheckExistingTables ( tStmt, sError ) )
return false;
if ( !CheckAttrs ( tStmt.m_tCreateTable.m_dAttrs, []( const CreateTableAttr_t & tAttr ) { return tAttr.m_tAttr.m_sName; }, sError ) )
return false;
if ( !CheckAttrs ( tStmt.m_tCreateTable.m_dFields, []( const CSphColumnInfo & tAttr ) { return tAttr.m_sName; }, sError ) )
return false;
// cross-checks attrs and fields
for ( const auto & i : tStmt.m_tCreateTable.m_dAttrs )
for ( const auto & j : tStmt.m_tCreateTable.m_dFields )
if ( i.m_tAttr.m_sName==j.m_sName && i.m_tAttr.m_eAttrType!=SPH_ATTR_STRING )
{
sError.SetSprintf ( "duplicate attribute name '%s'", i.m_tAttr.m_sName.cstr() );
return false;
}
return true;
}
static Threads::Coro::Mutex_c g_tCreateTableMutex;
static void HandleMysqlCreateTable ( RowBuffer_i & tOut, const SqlStmt_t & tStmt, CSphString & sWarning )
{
SearchFailuresLog_c dErrors;
CSphString sError;
if ( !sphCheckWeCanModify ( tOut ) )
return;
if ( !IsConfigless() )
{
sError = "CREATE TABLE requires data_dir to be set in the config file";
tOut.Error ( sError.cstr() );
return;
}
// only one create table at the time allowed and multiple concurent CREATE TABLE if not exists passes well
Threads::ScopedCoroMutex_t tCreateTableLock ( g_tCreateTableMutex );
if ( !CheckCreateTable ( tStmt, sError ) )
{
sError.SetSprintf ( "table '%s': CREATE TABLE failed: %s", tStmt.m_sIndex.cstr(), sError.cstr() );
tOut.Error ( sError.cstr() );
return;
}
StrVec_t dWarnings;
bool bCreatedOk = CreateNewIndexConfigless ( tStmt.m_sIndex, tStmt.m_tCreateTable, dWarnings, sError );
sWarning = ConcatWarnings(dWarnings);
if ( !bCreatedOk )
{
sError.SetSprintf ( "error adding table '%s': %s", tStmt.m_sIndex.cstr(), sError.cstr() );
tOut.Error ( sError.cstr() );
return;
}
tOut.Ok ( 0, dWarnings.GetLength() );
}
static const CSphSchema & GetSchemaForCreateTable ( const CSphIndex * pIndex )
{
assert ( pIndex );
assert ( pIndex->IsRT() || pIndex->IsPQ() );
return ((const RtIndex_i*)pIndex)->GetInternalSchema();
}
static CSphString BuildCreateTableRt ( const CSphString & sName, const CSphIndex * pIndex, const CSphSchema & tSchema )
{
assert(pIndex);
CSphString sCreateTable = BuildCreateTable ( sName, pIndex, tSchema );
return sCreateTable;
}
static void HandleMysqlCreateTableLike ( RowBuffer_i & tOut, const SqlStmt_t & tStmt, CSphString & sWarning )
{
SearchFailuresLog_c dErrors;
CSphString sError;
if ( !IsConfigless() )
{
sError = "CREATE TABLE requires data_dir to be set in the config file";
tOut.Error ( sError.cstr() );
return;
}
if ( !CheckExistingTables ( tStmt, sError ) )
{
sError.SetSprintf ( "table '%s': CREATE TABLE failed: %s", tStmt.m_sIndex.cstr(), sError.cstr() );
tOut.Error ( sError.cstr() );
return;
}
const CSphString & sLike = tStmt.m_tCreateTable.m_sLike;
CSphString sCreateTable;
switch ( IndexIsServed ( sLike ) )
{
case RunIdx_e::NOTSERVED:
sError.SetSprintf ( "table '%s': CREATE TABLE LIKE failed: no table '%s' found", tStmt.m_sIndex.cstr(), sLike.cstr() );
tOut.Error ( sError.cstr() );
return;
case RunIdx_e::LOCAL:
{
auto pServed = GetServed ( sLike );
assert ( pServed );
if ( !ServedDesc_t::IsMutable ( pServed ) )
{
tOut.ErrorAbsent ( "table '%s' is not real-time or percolate", sError.cstr() );
return;
}
RIdx_c pIdx { pServed };
sCreateTable = BuildCreateTableRt ( tStmt.m_sIndex, pIdx, GetSchemaForCreateTable ( pIdx ) );
break;
}
case RunIdx_e::DISTR:
{
auto pDist = GetDistr ( sLike );
sCreateTable = BuildCreateTableDistr ( tStmt.m_sIndex, *pDist );
}
default: break;
};
CSphVector<SqlStmt_t> dCreateTableStmts;
if ( !ParseDdl ( FromStr ( sCreateTable ), dCreateTableStmts, sError ) )
{
tOut.Error ( sError.cstr() );
return;
}
if ( dCreateTableStmts.GetLength()!=1 )
{
tOut.Error ( "CREATE TABLE LIKE failed" );
return;
}
SqlStmt_t & tNewCreateTable = dCreateTableStmts[0];
tNewCreateTable.m_tCreateTable.m_bIfNotExists = tStmt.m_tCreateTable.m_bIfNotExists;
HandleMysqlCreateTable ( tOut, tNewCreateTable, sWarning );
}
static void HandleMysqlDropTable ( RowBuffer_i & tOut, const SqlStmt_t & tStmt, CSphString & sWarning )
{
if ( !sphCheckWeCanModify ( tOut ) )
return;
CSphString sError;
if ( !IsConfigless() )
{
sError = "DROP TABLE requires data_dir to be set in the config file";
tOut.Error ( sError.cstr() );
return;
}
bool bDropped = DropIndexInt ( tStmt.m_sIndex.cstr(), tStmt.m_bIfExists, sError, &sWarning );
sphLogDebug ( "dropped table %s, ok %d, error %s", tStmt.m_sIndex.cstr(), (int)bDropped, sError.scstr() ); // FIXME!!! remove
if ( !bDropped )
tOut.Error ( sError.cstr() );
else
tOut.Ok ( 0, ( sWarning.IsEmpty() ? 0 : 1 ) );
}
void HandleMysqlShowCreateTable ( RowBuffer_i & tOut, const SqlStmt_t & tStmt )
{
auto pServed = GetServed ( tStmt.m_sIndex );
auto pDist = GetDistr ( tStmt.m_sIndex );
if ( !pServed && !pDist )
{
tOut.ErrorAbsent ( "no such table '%s'", tStmt.m_sIndex.cstr () );
return;
}
if ( pServed && !ServedDesc_t::IsMutable ( pServed ) )
{
tOut.ErrorAbsent ( "table '%s' is not real-time or percolate", tStmt.m_sIndex.cstr () );
return;
}
// result set header packet
tOut.HeadTuplet ( "Table", "Create Table" );
CSphString sCreateTable;
if ( pServed )
{
RIdx_c pIdx { pServed };
sCreateTable = BuildCreateTableRt ( pIdx->GetName(), pIdx, GetSchemaForCreateTable ( pIdx ) );
} else
sCreateTable = BuildCreateTableDistr ( tStmt.m_sIndex, *pDist );
tOut.DataTuplet ( tStmt.m_sIndex.cstr(), sCreateTable.cstr() );
tOut.Eof();
}
// MySQL Workbench (and maybe other clients) crashes without it
void HandleMysqlShowDatabases ( RowBuffer_i & tOut, SqlStmt_t & )
{
tOut.HeadBegin ();
tOut.HeadColumn ( "Databases" );
tOut.HeadEnd();
tOut.PutString ( g_sDbName );
tOut.Commit ();
tOut.Eof();
}
void HandleMysqlShowPlugins ( RowBuffer_i & tOut, SqlStmt_t & )
{
CSphVector<PluginInfo_t> dPlugins;
sphPluginList ( dPlugins );
tOut.HeadBegin ();
tOut.HeadColumn ( "Type" );
tOut.HeadColumn ( "Name" );
tOut.HeadColumn ( "Library" );
tOut.HeadColumn ( "Users" );
tOut.HeadColumn ( "Extra" );
tOut.HeadEnd();
ARRAY_FOREACH ( i, dPlugins )
{
const PluginInfo_t & p = dPlugins[i];
tOut.PutString ( g_dPluginTypes[p.m_eType] );
tOut.PutString ( p.m_sName );
tOut.PutString ( p.m_sLib );
tOut.PutNumAsString ( p.m_iUsers );
tOut.PutString ( p.m_sExtra );
if ( !tOut.Commit() )
return;
}
tOut.Eof();
}
enum ThreadInfoFormat_e
{
THD_FORMAT_NATIVE,
THD_FORMAT_SPHINXQL
};
static Str_t FormatInfo ( const PublicThreadDesc_t & tThd, ThreadInfoFormat_e eFmt, QuotationEscapedBuilder & tBuf )
{
if ( tThd.m_pQuery && eFmt==THD_FORMAT_SPHINXQL && tThd.m_eProto!=Proto_e::MYSQL41 )
{
bool bGotQuery = false;
if ( tThd.m_pQuery )
{
tBuf.Clear();
FormatSphinxql ( *tThd.m_pQuery, 0, tBuf );
bGotQuery = true;
}
// query might be removed prior to lock then go to common path
if ( bGotQuery )
return (Str_t)tBuf;
}
if ( tThd.m_sDescription.IsEmpty () && tThd.m_szCommand )
return FromSz ( tThd.m_szCommand );
else
return (Str_t)tThd.m_sDescription;
}
void HandleMysqlShowThreads ( RowBuffer_i & tOut, const SqlStmt_t * pStmt )
{
ThreadInfoFormat_e eFmt = THD_FORMAT_NATIVE;
bool bAll = false;
int iCols = -1;
if ( pStmt )
{
if ( pStmt->m_sThreadFormat == "sphinxql" )
eFmt = THD_FORMAT_SPHINXQL;
else if ( pStmt->m_sThreadFormat == "all" )
bAll = true;
iCols = pStmt->m_iThreadsCols;
}
tOut.HeadBegin ();
tOut.HeadColumn ( "TID", MYSQL_COL_LONG );
tOut.HeadColumn ( "Name" );
tOut.HeadColumn ( "Proto" );
tOut.HeadColumn ( "State" );
tOut.HeadColumn ( "Connection from" );
tOut.HeadColumn ( "ConnID", MYSQL_COL_LONGLONG );
// tOut.HeadColumn ( "Time", MYSQL_COL_FLOAT );
tOut.HeadColumn ( "This/prev job time" );
if ( g_bCpuStats )
{
// tOut.HeadColumn ( "Work time CPU" );
tOut.HeadColumn ( "CPU activity", MYSQL_COL_FLOAT);
}
tOut.HeadColumn ( "Jobs done", MYSQL_COL_LONG );
tOut.HeadColumn ( "Thread status" );
if ( bAll )
tOut.HeadColumn ( "Chain" );
tOut.HeadColumn ( "Info" );
if (!tOut.HeadEnd())
return;
QuotationEscapedBuilder tBuf;
// sphLogDebug ( "^^ Show threads. Current info is %p", GetTaskInfo () );
CSphSwapVector<PublicThreadDesc_t> dFinal;
Threads::IterateActive([&dFinal, iCols] ( Threads::LowThreadDesc_t * pThread ){
if ( pThread )
dFinal.Add ( GatherPublicThreadInfo ( pThread, iCols ) );
});
for ( const auto & dThd : dFinal )
{
if ( !bAll && dThd.m_eTaskState==TaskState_e::UNKNOWN )
continue;
tOut.PutNumAsString ( dThd.m_iThreadID ); // TID
tOut.PutString ( dThd.m_sThreadName ); // Name
tOut.PutString ( dThd.m_sProto ); // Proto
tOut.PutString ( TaskStateName ( dThd.m_eTaskState ) ); // State
tOut.PutString ( dThd.m_sClientName ); // Connection from
tOut.PutNumAsString ( dThd.m_iConnID ); // ConnID
int64_t tmNow = sphMicroTimer (); // short-term cache
// tOut.PutMicrosec ( tmNow-dThd.m_tmStart.value_or(tmNow) ); // time
// tOut.PutTimeAsString ( dThd.m_tmTotalWorkedTimeUS ); // work time
// This/prev job time
if ( dThd.m_tmLastJobStartTimeUS < 0 )
tOut.PutString ( "-" ); // last job take
else if ( dThd.m_tmLastJobDoneTimeUS < 0 )
tOut.PutTimeAsString ( tmNow - dThd.m_tmLastJobStartTimeUS );
else
tOut.PutTimeAsString ( dThd.m_tmLastJobDoneTimeUS - dThd.m_tmLastJobStartTimeUS, " (prev)" );
if ( g_bCpuStats )
{
// tOut.PutTimeAsString ( dThd.m_tmTotalWorkedCPUTimeUS ); // work CPU time
tOut.PutPercentAsString ( dThd.m_tmTotalWorkedCPUTimeUS, dThd.m_tmTotalWorkedTimeUS ); // CPU activity
}
tOut.PutNumAsString ( dThd.m_iTotalJobsDone ); // jobs done
if ( dThd.m_tmLastJobStartTimeUS<0 )
{
tOut.PutString ( "idling" ); // idle for
} else if ( dThd.m_tmLastJobDoneTimeUS<0 )
{
tOut.PutString ( "working" ); // idle for
} else
{
tOut.PutString ( "idling" ); // notice, just 'idling' instead of 'idling for N seconds'. So, value of dThd.m_tmLastJobDoneTimeUS is never more displayed.
}
if ( bAll )
tOut.PutString ( dThd.m_sChain ); // Chain
auto sInfo = FormatInfo ( dThd, eFmt, tBuf );
if ( iCols >= 0 && iCols < sInfo.second )
sInfo.second = iCols;
tOut.PutString ( sInfo ); // Info m_pTaskInfo
if ( !tOut.Commit () )
break;
}
tOut.Eof();
}
// helper; available only via 'select ... from @@system.sessions...'
void HandleShowSessions ( RowBuffer_i& tOut, const SqlStmt_t* pStmt )
{
ThreadInfoFormat_e eFmt = THD_FORMAT_NATIVE;
bool bAll = false;
int iCols = -1;
if ( pStmt )
{
if ( pStmt->m_sThreadFormat == "sphinxql" )
eFmt = THD_FORMAT_SPHINXQL;
else if ( pStmt->m_sThreadFormat == "all" )
bAll = true;
iCols = pStmt->m_iThreadsCols;
}
tOut.HeadBegin ();
tOut.HeadColumn ( "Proto" );
tOut.HeadColumn ( "State" );
tOut.HeadColumn ( "Host" );
tOut.HeadColumn ( "ConnID", MYSQL_COL_LONGLONG );
tOut.HeadColumn ( "Killed" );
if ( bAll )
tOut.HeadColumn ( "Chain" );
tOut.HeadColumn ( "Last cmd time" );
tOut.HeadColumn ( "Last cmd" );
if ( !tOut.HeadEnd() )
return;
QuotationEscapedBuilder tBuf;
// sphLogDebug ( "^^ Show threads. Current info is %p", GetTaskInfo () );
CSphSwapVector<PublicThreadDesc_t> dFinal;
IterateTasks ( [&dFinal, iCols] ( ClientTaskInfo_t* pTask ) {
if ( pTask )
{
PublicThreadDesc_t& tDesc = dFinal.Add();
tDesc.m_iDescriptionLimit = iCols;
GatherPublicTaskInfo ( tDesc, pTask );
}
} );
for ( const auto& dThd : dFinal )
{
if ( !bAll && dThd.m_eTaskState == TaskState_e::UNKNOWN )
continue;
tOut.PutString ( dThd.m_sProto );
tOut.PutString ( TaskStateName ( dThd.m_eTaskState ) );
tOut.PutString ( dThd.m_sClientName ); // Host
tOut.PutNumAsString ( dThd.m_iConnID ); // ConnID
tOut.PutNumAsString ( dThd.m_bKilled ? 1 : 0);
if ( bAll )
tOut.PutString ( dThd.m_sChain ); // Chain
if ( dThd.m_tmLastJobDoneTimeUS==-1 ) // not yet finished
tOut.PutTimestampAsString ( dThd.m_tmLastJobStartTimeUS );
else
tOut.PutTimeAsString ( dThd.m_tmLastJobDoneTimeUS - dThd.m_tmLastJobStartTimeUS );
auto sInfo = FormatInfo ( dThd, eFmt, tBuf );
if ( iCols >= 0 && iCols < sInfo.second )
sInfo.second = iCols;
tOut.PutString ( sInfo ); // Info m_pTaskInfo
if ( !tOut.Commit() )
break;
}
tOut.Eof();
}
void HandleMysqlFlushHostnames ( RowBuffer_i & tOut )
{
SmallStringHash_T<DWORD> hHosts;
// Collect all urls from all distr indexes
assert ( g_pDistIndexes );
auto pDistSnapshot = g_pDistIndexes->GetHash();
for ( auto& tIt : *pDistSnapshot )
tIt.second->ForEveryHost ( [&] ( AgentDesc_t& tDesc ) {
if ( tDesc.m_bNeedResolve )
hHosts.Add ( tDesc.m_uAddr, tDesc.m_sAddr );
});
for ( auto tHost : hHosts )
{
DWORD uRenew = sphGetAddress ( tHost.first.cstr() );
if ( uRenew )
tHost.second = uRenew;
}
// copy back renew hosts to distributed agents.
// case when distr index list changed between collecting urls and applying them
// is safe, since we are iterating over the list again, and also apply
// only existing hosts.
for ( auto& tIt : *pDistSnapshot )
tIt.second->ForEveryHost ( [&] ( AgentDesc_t& tDesc ) {
if ( tDesc.m_bNeedResolve )
{
DWORD * pRenew = hHosts ( tDesc.m_sAddr );
if ( pRenew && *pRenew )
tDesc.m_uAddr = *pRenew;
}
});
tOut.Ok ( hHosts.GetLength() );
}
void HandleMysqlFlushLogs ( RowBuffer_i & tOut )
{
sigusr1(1);
tOut.Ok ();
}
void HandleMysqlReloadIndexes ( RowBuffer_i & tOut )
{
g_bReloadForced = true;
sighup(1);
tOut.Ok ();
}
/////////////////////////////////////////////////////////////////////////////
// user variables these send from master to agents
/////////////////////////////////////////////////////////////////////////////
class UVarRequestBuilder_c : public RequestBuilder_i
{
public:
UVarRequestBuilder_c ( const char * sName, const CSphVector<SphAttr_t> &dSetValues )
: m_sName ( sName )
{
m_iUserVars = dSetValues.GetLength ();
m_dBuf.Reset ( m_iUserVars * sizeof ( dSetValues[0] ) + 129 );
// 129 above is the safe gap for VLB delta encoding 64-bits ints.
// If we have 1st value 0x8000`0000`0000`0000 - it will occupy 10 bytes VLB,
// then up to 127 values 0x0100.. - each will occupy 9 bytes VLB,
// deltas 0x00XX.. takes 8 bytes or less. So, 2+127 bytes gap is enough to cover worst possible case
// (since 0x80.. + 127 * 0x01.. produce 0xFF.. num, any other delta >0x01.. impossible, since
// it will cause overflow, and deltals <0x01.. occupy <=8 bytes each).
SphAttr_t iLast = 0;
BYTE * pCur = m_dBuf.Begin ();
for ( const auto &dValue : dSetValues )
{
pCur += ZipToPtrLE ( pCur, dValue - iLast );
iLast = dValue;
}
m_iLength = pCur-m_dBuf.Begin();
}
void BuildRequest ( const AgentConn_t &, ISphOutputBuffer & tOut ) const final
{
// API header
auto tHdr = APIHeader ( tOut, SEARCHD_COMMAND_UVAR, VER_COMMAND_UVAR );
tOut.SendString ( m_sName.cstr() );
tOut.SendInt ( m_iUserVars );
tOut.SendArray ( m_dBuf, m_iLength );
}
CSphString m_sName;
CSphFixedVector<BYTE> m_dBuf { 0 };
int m_iUserVars = 0;
int m_iLength = 0;
};
class UVarReplyParser_c : public ReplyParser_i
{
public:
bool ParseReply ( MemInputBuffer_c & tReq, AgentConn_t & ) const final
{
// error got handled at call site
bool bOk = ( tReq.GetByte()==1 );
return bOk;
}
};
static bool SendUserVar ( const char * sIndex, const char * sUserVarName, CSphVector<SphAttr_t> & dSetValues, CSphString & sError )
{
auto pIndex = GetDistr ( sIndex );
if ( !pIndex )
{
sError.SetSprintf ( "unknown table '%s' in Set statement", sIndex );
return false;
}
VecRefPtrsAgentConn_t dAgents;
pIndex->GetAllHosts ( dAgents );
bool bGotLocal = !pIndex->m_dLocal.IsEmpty();
// FIXME!!! warn on missed agents
if ( dAgents.IsEmpty() && !bGotLocal )
return true;
dSetValues.Uniq();
// FIXME!!! warn on empty agents
// connect to remote agents and query them
if ( !dAgents.IsEmpty() )
{
UVarRequestBuilder_c tReqBuilder ( sUserVarName, dSetValues );
UVarReplyParser_c tParser;
PerformRemoteTasks ( dAgents, &tReqBuilder, &tParser );
}
// should be at the end due to swap of dSetValues values
if ( bGotLocal )
SetLocalUserVar ( sUserVarName, dSetValues );
return true;
}
void HandleCommandUserVar ( ISphOutputBuffer & tOut, WORD uVer, InputBuffer_c & tReq )
{
if ( !CheckCommandVersion ( uVer, VER_COMMAND_UVAR, tOut ) )
return;
CSphString sUserVar = tReq.GetString();
int iCount = tReq.GetInt();
CSphVector<SphAttr_t> dUserVar ( iCount );
int iLength = tReq.GetInt();
CSphFixedVector<BYTE> dBuf ( iLength );
tReq.GetBytes ( dBuf.Begin(), iLength );
if ( tReq.GetError() )
{
SendErrorReply ( tOut, "invalid or truncated request" );
return;
}
SphAttr_t iLast = 0;
const BYTE * pCur = dBuf.Begin();
ARRAY_FOREACH ( i, dUserVar )
{
auto iDelta = UnzipValueLE<int64_t> ( [&pCur]() mutable { return *pCur++; } );
assert ( iDelta>0 );
iLast += iDelta;
dUserVar[i] = iLast;
}
SetLocalUserVar ( sUserVar, dUserVar );
auto tReply = APIAnswer ( tOut, VER_COMMAND_UVAR );
tOut.SendInt ( 1 );
}
/////////////////////////////////////////////////////////////////////////////
// SMART UPDATES HANDLER
/////////////////////////////////////////////////////////////////////////////
SphinxqlReplyParser_c::SphinxqlReplyParser_c ( int * pUpd, int * pWarns )
: m_pUpdated ( pUpd )
, m_pWarns ( pWarns )
{}
// fixme! reuse code from sphinxql, leave only refs here
bool SphinxqlReplyParser_c::ParseReply ( MemInputBuffer_c & tReq, AgentConn_t & ) const
{
DWORD uSize = ( tReq.GetLSBDword() & 0x00ffffff ) - 1;
BYTE uCommand = tReq.GetByte();
if ( uCommand==0 ) // ok packet
{
*m_pUpdated += MysqlUnpack ( tReq, &uSize );
MysqlUnpack ( tReq, &uSize ); ///< int Insert_id (don't used).
auto uWarnStatus = tReq.GetLSBDword ();
*m_pWarns += ( uWarnStatus >> 16 ) & 0xFFFF; ///< num of warnings
uSize -= 4;
if ( uSize )
tReq.GetRawString ( uSize );
return true;
}
if ( uCommand==0xff ) // error packet
{
tReq.GetByte();
tReq.GetByte(); ///< num of errors (2 bytes), we don't use it for now.
uSize -= 2;
if ( uSize )
tReq.GetRawString ( uSize );
}
return false;
}
SphinxqlRequestBuilder_c::SphinxqlRequestBuilder_c ( Str_t sQuery, const SqlStmt_t & tStmt )
: m_sBegin { sQuery.first, tStmt.m_iListStart }
, m_sEnd ( sQuery.first + tStmt.m_iListEnd, sQuery.second - tStmt.m_iListEnd )
{
}
void SphinxqlRequestBuilder_c::BuildRequest ( const AgentConn_t & tAgent, ISphOutputBuffer & tOut ) const
{
const char* sIndexes = tAgent.m_tDesc.m_sIndexes.cstr();
// API header
auto tHdr = APIHeader ( tOut, SEARCHD_COMMAND_SPHINXQL, VER_COMMAND_SPHINXQL );
APIBlob_c dWrapper ( tOut ); // sphinxql wrapped twice, so one more length need to be written.
tOut.SendBytes ( m_sBegin );
tOut.SendBytes ( sIndexes );
tOut.SendBytes ( m_sEnd );
}
//////////////////////////////////////////////////////////////////////////
static void DoExtendedUpdate ( const SqlStmt_t & tStmt, const CSphString & sIndex, const char * szDistributed, bool bBlobUpdate, int & iSuccesses, int & iUpdated, SearchFailuresLog_c & dFails, CSphString & sWarning, const cServedIndexRefPtr_c & pServed )
{
TRACE_CORO ( "rt", "DoExtendedUpdate" );
TlsMsg::ResetErr();
// checks
if ( !pServed )
{
dFails.Submit ( sIndex, szDistributed, "table not available" );
return;
}
if ( !ValidateClusterStatement ( sIndex, *pServed, tStmt.m_sCluster, IsHttpStmt ( tStmt ) ) )
{
dFails.Submit ( sIndex, szDistributed, TlsMsg::szError() );
return;
}
RtAccum_t tAcc;
ReplicationCommand_t * pCmd = tAcc.AddCommand ( tStmt.m_bJson ? ReplCmd_e::UPDATE_JSON : ReplCmd_e::UPDATE_QL, sIndex, tStmt.m_sCluster );
assert ( pCmd );
pCmd->m_pUpdateAPI = tStmt.AttrUpdatePtr();
pCmd->m_bBlobUpdate = bBlobUpdate;
pCmd->m_pUpdateCond = &tStmt.m_tQuery;
HandleCmdReplicateUpdate ( tAcc, sWarning, iUpdated );
if ( TlsMsg::HasErr() )
{
dFails.Submit ( sIndex, szDistributed, TlsMsg::szError() );
return;
}
iSuccesses++;
}
bool HandleUpdateAPI ( AttrUpdateArgs& tArgs, CSphIndex* pIndex, int& iUpdate )
{
bool bCritical = false;
iUpdate = pIndex->UpdateAttributes ( tArgs.m_pUpdate, bCritical, *tArgs.m_pError, *tArgs.m_pWarning );
return !bCritical;
}
namespace {
DocsCollector_c InitUpdate( AttrUpdateArgs& tArgs, const cServedIndexRefPtr_c& pDesc )
{
DocsCollector_c tCollector ( *tArgs.m_pQuery, tArgs.m_bJson, *tArgs.m_pIndexName, pDesc, tArgs.m_pError );
AttrUpdateSharedPtr_t& pUpdate = tArgs.m_pUpdate;
pUpdate->m_bReusable = false;
pUpdate->m_bIgnoreNonexistent = tArgs.m_pQuery->m_bIgnoreNonexistent;
pUpdate->m_bStrict = tArgs.m_pQuery->m_bStrict;
return tCollector;
}
static void DoUpdate( DocsCollector_c& tCollector, AttrUpdateArgs& tArgs, CSphIndex * pIndex )
{
TRACE_CORO ( "rt", "DoUpdate" );
AttrUpdateSharedPtr_t& pUpdate = tArgs.m_pUpdate;
while ( tCollector.GetValuesChunk ( pUpdate->m_dDocids, tArgs.m_pQuery->m_iMaxMatches ) )
{
int iChanged = 0;
Verify ( HandleUpdateAPI ( tArgs, pIndex, iChanged ) ); // fixme! handle this
tArgs.m_iAffected += iChanged;
}
}
void UpdateWlocked ( AttrUpdateArgs& tArgs, const cServedIndexRefPtr_c& pDesc, int& iUpdated )
{
TRACE_CORO ( "sph", "UpdateWlocked" );
// short-living r-lock m.b. acquired and released by collector when running query
auto tCollector = InitUpdate ( tArgs, pDesc );
BEGIN_CORO ( "wait", "take w-lock" );
WIdx_c wLocked { pDesc }; // exclusive lock for process of update. Note, between collecting and updating m.b. race! To eliminate it, need to trace index generation and recollect if it changed.
END_CORO ( "wait" );
DoUpdate ( tCollector, tArgs, wLocked );
}
void UpdateRlocked ( AttrUpdateArgs& tArgs, const cServedIndexRefPtr_c& pDesc, int& iUpdated)
{
TRACE_CORO ( "sph", "UpdateRlocked" );
// wide r-lock over whole update. r-locks acquired by collector should be re-enterable.
BEGIN_CORO ( "wait", "take r-lock" );
RWIdx_c rLocked { pDesc };
END_CORO ( "wait" );
auto tCollector = InitUpdate ( tArgs, pDesc );
DoUpdate ( tCollector, tArgs, rLocked );
}
} // unnamed namespace
void HandleMySqlExtendedUpdate ( AttrUpdateArgs& tArgs, const cServedIndexRefPtr_c& pDesc, int& iUpdated, bool bNeedWlock )
{
TRACE_CORO ( "sph", "HandleMySqlExtendedUpdate" );
return bNeedWlock ? UpdateWlocked ( tArgs, pDesc, iUpdated ) : UpdateRlocked ( tArgs, pDesc, iUpdated );
}
void sphHandleMysqlUpdate ( StmtErrorReporter_i & tOut, const SqlStmt_t & tStmt, Str_t sQuery )
{
TRACE_CORO ( "sph", "sphHandleMysqlUpdate" );
if ( !sphCheckWeCanModify ( tOut ) )
return;
auto* pSession = session::GetClientSession();
pSession->FreezeLastMeta();
auto& sWarning = pSession->m_tLastMeta.m_sWarning;
StatCountCommand ( SEARCHD_COMMAND_UPDATE );
int64_t tmStart = sphMicroTimer();
// extract index names
StrVec_t dIndexNames;
ParseIndexList ( tStmt.m_sIndex, dIndexNames );
if ( dIndexNames.IsEmpty() )
{
tOut.Error ( "no such table '%s'", tStmt.m_sIndex.cstr() );
return;
}
DistrPtrs_t dDistributed;
// copy distributed indexes description
CSphString sMissed;
if ( !ExtractDistributedIndexes ( dIndexNames, dDistributed, sMissed ) )
{
tOut.Error ( "unknown table '%s' in update request", sMissed.cstr() );
return;
}
// do update
SearchFailuresLog_c dFails;
int iSuccesses = 0;
int iUpdated = 0;
int iWarns = 0;
bool bBlobUpdate = false;
for ( const auto & i : tStmt.AttrUpdate().m_dAttributes )
{
if ( i.m_sName==sphGetDocidName() )
{
tOut.Error ( "'id' attribute cannot be updated" );
return;
}
bBlobUpdate |= sphIsBlobAttr ( i.m_eType );
}
ARRAY_FOREACH ( iIdx, dIndexNames )
{
const char * sReqIndex = dIndexNames[iIdx].cstr();
auto pLocked = GetServed ( sReqIndex );
if ( pLocked )
{
DoExtendedUpdate ( tStmt, sReqIndex, nullptr, bBlobUpdate, iSuccesses, iUpdated, dFails, sWarning, pLocked );
} else if ( dDistributed[iIdx] )
{
assert ( !dDistributed[iIdx]->IsEmpty() );
const StrVec_t & dLocal = dDistributed[iIdx]->m_dLocal;
ARRAY_FOREACH ( i, dLocal )
{
const char * sLocal = dLocal[i].cstr();
auto pServed = GetServed ( sLocal );
DoExtendedUpdate ( tStmt, sLocal, sReqIndex, bBlobUpdate, iSuccesses, iUpdated, dFails, sWarning, pServed );
}
}
// update remote agents
if ( dDistributed[iIdx] && !dDistributed[iIdx]->m_dAgents.IsEmpty() )
{
const DistributedIndex_t * pDist = dDistributed[iIdx];
VecRefPtrs_t<AgentConn_t *> dAgents;
pDist->GetAllHosts ( dAgents );
// connect to remote agents and query them
std::unique_ptr<RequestBuilder_i> pRequestBuilder = CreateRequestBuilder ( sQuery, tStmt );
std::unique_ptr<ReplyParser_i> pReplyParser = CreateReplyParser ( tStmt.m_bJson, iUpdated, iWarns );
iSuccesses += PerformRemoteTasks ( dAgents, pRequestBuilder.get (), pReplyParser.get () );
}
}
StringBuilder_c sReport;
dFails.BuildReport ( sReport );
StatCountCommandDetails ( SearchdStats_t::eUpdate, iUpdated, tmStart );
if ( !iSuccesses )
{
tOut.Error ( "%s", sReport.cstr() );
return;
} else
{
int64_t tmRealTime = sphMicroTimer() - tmStart;
LogStatementSphinxql ( sQuery, (int)( tmRealTime / 1000 ) );
}
tOut.Ok ( iUpdated, iWarns );
}
bool HandleMysqlSelect ( RowBuffer_i & dRows, SearchHandler_c & tHandler )
{
// lets check all query for errors
StringBuilder_c sError { "; " };
CSphVector<int64_t> dAgentTimes; // dummy for error reporting
ARRAY_FOREACH ( i, tHandler.m_dQueries )
{
CheckQuery ( tHandler.m_dQueries[i], tHandler.m_dAggrResults[i].m_sError, tHandler.m_dQueries.GetLength() == 1 );
if ( !tHandler.m_dAggrResults[i].m_sError.IsEmpty() )
{
LogQuery ( tHandler.m_dQueries[i], tHandler.m_dAggrResults[i], dAgentTimes );
if ( tHandler.m_dQueries.GetLength()==1 )
sError << tHandler.m_dAggrResults[0].m_sError;
else
sError.Sprintf( "query %d error: %s", i, tHandler.m_dAggrResults[i].m_sError.cstr() );
}
}
if ( !sError.IsEmpty() )
{
// stmt is intentionally NULL, as we did all the reporting just above
dRows.Error ( sError.cstr() );
return false;
}
// actual searching
tHandler.RunQueries();
if ( sphInterrupted() )
{
sphLogDebug ( "HandleClientMySQL: got SIGTERM, sending the packet MYSQL_ERR_SERVER_SHUTDOWN" );
dRows.Error ( "Server shutdown in progress", EMYSQL_ERR::SERVER_SHUTDOWN );
return false;
}
return true;
}
inline static int Bit ( int iBit, const unsigned int * pData )
{
return ( ( pData[iBit / 32] & ( 1 << ( iBit % 32 ) ) ) ? 1 : 0 );
}
void sphFormatFactors ( StringBuilder_c & sOut, const unsigned int * pFactors, bool bJson )
{
sOut.GrowEnough ( 512 );
// format lines for header, fields, words
const char * sBmFmt = nullptr;
const char * sFieldFmt = nullptr;
const char * sWordFmt = nullptr;
ScopedComma_c sDelim;
if ( bJson )
{
sBmFmt = R"("bm25":%d, "bm25a":%f, "field_mask":%u, "doc_word_count":%d)";
sFieldFmt = R"({"field":%d, "lcs":%u, "hit_count":%u, "word_count":%u, "tf_idf":%f, "min_idf":%f, )"
R"("max_idf":%f, "sum_idf":%f, "min_hit_pos":%d, "min_best_span_pos":%d, "exact_hit":%u, )"
R"("max_window_hits":%d, "min_gaps":%d, "exact_order":%u, "lccs":%d, "wlccs":%f, "atc":%f})";
sWordFmt = R"(%i{"tf":%d, "idf":%f})";
sDelim.Init ( sOut, ", ", "{", "}" );
} else
{
sBmFmt = "bm25=%d, bm25a=%f, field_mask=%u, doc_word_count=%d";
sFieldFmt = "field%d=(lcs=%u, hit_count=%u, word_count=%u, tf_idf=%f, min_idf=%f, max_idf=%f, sum_idf=%f, "
"min_hit_pos=%d, min_best_span_pos=%d, exact_hit=%u, max_window_hits=%d, "
"min_gaps=%d, exact_order=%u, lccs=%d, wlccs=%f, atc=%f)";
sWordFmt = "word%d=(tf=%d, idf=%f)";
sDelim.Init ( sOut, ", " );
}
#define DI( _factor ) sphinx_get_doc_factor_int ( pFactors, SPH_DOCF_##_factor )
#define DF( _factor ) sphinx_get_doc_factor_float ( pFactors, SPH_DOCF_##_factor )
sOut.Sprintf ( sBmFmt, DI( BM25 ), DF( BM25A ), DI( MATCHED_FIELDS ), DI( DOC_WORD_COUNT ) );
{ ScopedComma_c sFields;
if ( bJson )
sFields.Init ( sOut, ", ", R"("fields":[)", "]");
auto pExactHit = sphinx_get_doc_factor_ptr ( pFactors, SPH_DOCF_EXACT_HIT_MASK );
auto pExactOrder = sphinx_get_doc_factor_ptr ( pFactors, SPH_DOCF_EXACT_ORDER_MASK );
int iFields = DI ( NUM_FIELDS );
for ( int i = 0; i<iFields; ++i )
{
#define FI( _factor ) sphinx_get_field_factor_int ( pField, SPH_FIELDF_##_factor )
#define FF( _factor ) sphinx_get_field_factor_float ( pField, SPH_FIELDF_##_factor )
auto pField = sphinx_get_field_factors ( pFactors, i );
if ( !FI (HIT_COUNT) )
continue;
sOut.Sprintf ( sFieldFmt, i, FI (LCS), FI (HIT_COUNT), FI (WORD_COUNT), FF (TF_IDF), FF (MIN_IDF),
FF (MAX_IDF), FF (SUM_IDF), FI (MIN_HIT_POS), FI (MIN_BEST_SPAN_POS), Bit (i, pExactHit),
FI (MAX_WINDOW_HITS), FI (MIN_GAPS), Bit (i, pExactOrder), FI (LCCS), FF (WLCCS), FF (ATC) );
#undef FF
#undef FI
}
} // fields block
{ ScopedComma_c sWords;
if ( bJson )
sWords.Init ( sOut, ", ", R"("words":[)", "]" );
auto iUniqQpos = DI ( MAX_UNIQ_QPOS );
for ( int i = 0; i<iUniqQpos; ++i )
{
auto pTerm = sphinx_get_term_factors ( pFactors, i + 1 );
if ( !sphinx_get_term_factor_int ( pTerm, SPH_TERMF_KEYWORD_MASK ) )
continue;
sOut.Sprintf ( sWordFmt, i, sphinx_get_term_factor_int ( pTerm, SPH_TERMF_TF ),
sphinx_get_term_factor_float ( pTerm, SPH_TERMF_IDF ) );
}
} // words block
#undef DF
#undef DI
}
static void ReturnZeroCount ( const CSphSchema & tSchema, const CSphBitvec & tAttrsToSend, const StrVec_t & dCounts, RowBuffer_i & dRows )
{
for ( int i=0; i<tSchema.GetAttrsCount(); ++i )
{
if ( !tAttrsToSend.BitGet(i) )
continue;
const CSphColumnInfo & tCol = tSchema.GetAttr ( i );
// @count or its alias or count(distinct attr_name)
if ( dCounts.Contains ( tCol.m_sName ) )
{
dRows.PutNumAsString ( 0 );
} else
{
// essentially the same as SELECT_DUAL, parse and print constant expressions
ESphAttr eAttrType;
CSphString sError;
ExprParseArgs_t tExprArgs;
tExprArgs.m_pAttrType = &eAttrType;
ISphExprRefPtr_c pExpr { sphExprParse ( tCol.m_sName.cstr(), tSchema, nullptr, sError, tExprArgs )};
if ( !pExpr || !pExpr->IsConst() )
eAttrType = SPH_ATTR_NONE;
CSphMatch tMatch;
const BYTE * pStr = nullptr;
switch ( eAttrType )
{
case SPH_ATTR_STRINGPTR:
pExpr->StringEval ( tMatch, &pStr );
dRows.PutString ( (const char *)pStr );
SafeDelete ( pStr );
break;
case SPH_ATTR_INTEGER: dRows.PutNumAsString ( pExpr->IntEval ( tMatch ) ); break;
case SPH_ATTR_BIGINT: dRows.PutNumAsString ( pExpr->Int64Eval ( tMatch ) ); break;
case SPH_ATTR_FLOAT: dRows.PutFloatAsString ( pExpr->Eval ( tMatch ) ); break;
default:
dRows.PutNULL();
break;
}
}
}
dRows.Commit();
}
CSphString BuildMetaOneline ( const CSphQueryResultMeta & tMeta )
{
// --- 0 out of 1115 results in 115ms ---
// --- 20 out of >= 20 results in 5.123s ---
StringBuilder_c sMeta;
// since we have us precision, printing 0 will output '0us', which is not necessary true.
if ( tMeta.m_iQueryTime > 0 )
sMeta.Sprintf ( "--- %d out of %s%l results in %.3t ---", tMeta.m_iMatches, ( tMeta.m_bTotalMatchesApprox ? ">=" : "" ), tMeta.m_iTotalMatches, (int64_t)tMeta.m_iQueryTime * 1000 );
else
sMeta.Sprintf ( "--- %d out of %s%l results in 0ms ---", tMeta.m_iMatches, ( tMeta.m_bTotalMatchesApprox ? ">=" : "" ), tMeta.m_iTotalMatches );
return (CSphString)sMeta;
}
static bool IsNullSet ( const CSphMatch & tMatch, int iAttr, SphAttr_t tNullMask, const CSphColumnInfo * pNullBitmaskAttr )
{
if ( !pNullBitmaskAttr )
return false;
if ( pNullBitmaskAttr->m_eAttrType==SPH_ATTR_STRINGPTR )
{
ByteBlob_t tBlob = sphUnpackPtrAttr ( (const BYTE*)tNullMask );
BitVec_T<const BYTE> tVec ( tBlob.first, tBlob.second*8 );
return tVec.BitGet(iAttr);
}
assert ( iAttr < 64 );
return !!( tNullMask & ( 1 << iAttr ) );
}
static void SendMysqlMatch ( const CSphMatch & tMatch, const CSphBitvec & tAttrsToSend, const ISphSchema & tSchema, RowBuffer_i & dRows, const CSphColumnInfo * pNullBitmaskAttr )
{
SphAttr_t tNullMask = pNullBitmaskAttr ? tMatch.GetAttr ( pNullBitmaskAttr->m_tLocator ) : 0;
for ( int i=0; i < tSchema.GetAttrsCount(); i++ )
{
if ( !tAttrsToSend.BitGet(i) )
continue;
if ( IsNullSet ( tMatch, i, tNullMask, pNullBitmaskAttr ) )
{
dRows.PutString("NULL");
continue;
}
const CSphColumnInfo & tAttr = tSchema.GetAttr(i);
const CSphAttrLocator & tLoc = tAttr.m_tLocator;
ESphAttr eAttrType = tAttr.m_eAttrType;
assert ( sphPlainAttrToPtrAttr(eAttrType)==eAttrType );
switch ( eAttrType )
{
case SPH_ATTR_INTEGER:
case SPH_ATTR_TIMESTAMP:
case SPH_ATTR_BOOL:
case SPH_ATTR_TOKENCOUNT:
dRows.PutNumAsString ( ( DWORD ) tMatch.GetAttr ( tLoc ) );
break;
case SPH_ATTR_BIGINT:
dRows.PutNumAsString( tMatch.GetAttr(tLoc) );
break;
case SPH_ATTR_UINT64:
dRows.PutNumAsString( (uint64_t)tMatch.GetAttr(tLoc) );
break;
case SPH_ATTR_FLOAT:
dRows.PutFloatAsString ( tMatch.GetAttrFloat(tLoc) );
break;
case SPH_ATTR_DOUBLE:
dRows.PutDoubleAsString ( tMatch.GetAttrDouble(tLoc) );
break;
case SPH_ATTR_INT64SET_PTR:
case SPH_ATTR_UINT32SET_PTR:
{
StringBuilder_c dStr;
sphPackedMVA2Str ( (const BYTE *)tMatch.GetAttr(tLoc), eAttrType==SPH_ATTR_INT64SET_PTR, dStr );
dRows.PutArray ( dStr, false );
}
break;
case SPH_ATTR_FLOAT_VECTOR_PTR:
{
StringBuilder_c dStr;
sphPackedFloatVec2Str ( (const BYTE *)tMatch.GetAttr(tLoc), dStr );
dRows.PutArray ( dStr, false );
}
break;
case SPH_ATTR_STRINGPTR:
{
auto * pString = ( const BYTE * ) tMatch.GetAttr ( tLoc );
auto dString = sphUnpackPtrAttr ( pString );
if ( dString.second>1 && dString.first[dString.second-2]=='\0' )
dString.second -= 2;
dRows.PutArray ( dString );
}
break;
case SPH_ATTR_JSON_PTR:
{
auto * pString = (const BYTE*) tMatch.GetAttr ( tLoc );
JsonEscapedBuilder sTmp;
if ( pString )
{
auto dJson = sphUnpackPtrAttr ( pString );
sphJsonFormat ( sTmp, dJson.first );
}
dRows.PutArray ( sTmp );
}
break;
case SPH_ATTR_FACTORS:
case SPH_ATTR_FACTORS_JSON:
{
auto dFactors = sphUnpackPtrAttr ((const BYTE *) tMatch.GetAttr ( tLoc ));
StringBuilder_c sTmp;
if ( !IsEmpty ( dFactors ))
sphFormatFactors ( sTmp, (const unsigned int *)dFactors.first, eAttrType==SPH_ATTR_FACTORS_JSON );
dRows.PutArray ( sTmp, false );
}
break;
case SPH_ATTR_JSON_FIELD_PTR:
{
const BYTE * pField = (const BYTE *)tMatch.GetAttr ( tLoc );
if ( !pField )
{
dRows.PutNULL();
break;
}
auto dField = sphUnpackPtrAttr ( pField );
auto eJson = ESphJsonType ( *dField.first++ );
if ( eJson==JSON_NULL )
{
dRows.PutNULL();
break;
}
// send string to client
JsonEscapedBuilder sTmp;
sphJsonFieldFormat ( sTmp, dField.first, eJson, false );
dRows.PutArray ( sTmp, false );
}
break;
default:
dRows.Add(1);
dRows.Add('-');
break;
}
}
}
// returns N of matches in resultset
uint64_t SendMysqlSelectResult ( RowBuffer_i & dRows, const AggrResult_t & tRes, bool bMoreResultsFollow, bool bAddQueryColumn, const CSphString * pQueryColumn, QueryProfile_c * pProfile )
{
CSphScopedProfile tProf ( pProfile, SPH_QSTATE_NET_WRITE );
if ( !tRes.m_iSuccesses )
{
if ( !tRes.m_sError.IsEmpty() )
{
// at this point, SELECT error logging should have been handled, so pass a NULL stmt to logger
dRows.Error ( tRes.m_sError.cstr() );
return 0;
}
assert ( tRes.m_sError.IsEmpty() );
auto iWarns = tRes.m_sWarning.IsEmpty() ? 0 : 1;
CSphString sMeta = BuildMetaOneline ( tRes );
dRows.HeadBegin();
dRows.HeadColumn ( "" );
dRows.HeadEnd();
dRows.Eof ( bMoreResultsFollow, iWarns, sMeta.cstr() );
return 0;
}
// empty result sets just might carry the full uberschema
// bummer! lets protect ourselves against that
CSphBitvec tAttrsToSend;
bool bReturnZeroCount = !tRes.m_dZeroCount.IsEmpty();
assert ( bReturnZeroCount || tRes.m_tSchema.GetAttrsCount() );
sphGetAttrsToSend ( tRes.m_tSchema, false, true, tAttrsToSend );
dRows.HeadBegin ();
for ( int i=0; i<tRes.m_tSchema.GetAttrsCount(); ++i )
{
if ( !tAttrsToSend.BitGet(i) )
continue;
const CSphColumnInfo & tCol = tRes.m_tSchema.GetAttr(i);
dRows.HeadColumn ( tCol.m_sName.cstr(), ESphAttr2MysqlColumn ( tCol.m_eAttrType ) );
}
if ( bAddQueryColumn )
dRows.HeadColumn ( "query" );
// EOF packet is sent explicitly due to non-default params.
auto iWarns = tRes.m_sWarning.IsEmpty() ? 0 : 1;
dRows.HeadEnd ( bMoreResultsFollow, iWarns );
// FIXME!!! replace that vector relocations by SqlRowBuffer
const CSphColumnInfo * pNullBitmaskAttr = tRes.m_tSchema.GetAttr ( GetNullMaskAttrName() );
assert ( tRes.m_bSingle );
auto dMatches = tRes.m_dResults.First ().m_dMatches.Slice ( tRes.m_iOffset, tRes.m_iCount );
uint64_t uMatches = tRes.m_dResults.First ().m_dMatches.GetLength();
for ( const auto & tMatch : dMatches )
{
SendMysqlMatch ( tMatch, tAttrsToSend, tRes.m_tSchema, dRows, pNullBitmaskAttr );
if ( bAddQueryColumn )
{
assert ( pQueryColumn );
dRows.PutString ( *pQueryColumn );
}
if ( !dRows.Commit() )
return uMatches;
}
if ( bReturnZeroCount )
ReturnZeroCount ( tRes.m_tSchema, tAttrsToSend, tRes.m_dZeroCount, dRows );
CSphString sMeta = BuildMetaOneline ( tRes );
// eof packet
dRows.Eof ( bMoreResultsFollow, iWarns, sMeta.cstr() );
return uMatches;
}
void HandleMysqlWarning ( const CSphQueryResultMeta & tLastMeta, RowBuffer_i & dRows, bool bMoreResultsFollow )
{
// can't send simple ok if there are more results to send
// as it breaks order of multi-result output
if ( tLastMeta.m_sWarning.IsEmpty() && !bMoreResultsFollow )
{
dRows.Ok();
return;
}
// result set header packet
dRows.HeadBegin ();
dRows.HeadColumn ( "Level" );
dRows.HeadColumn ( "Code", MYSQL_COL_DECIMAL );
dRows.HeadColumn ( "Message" );
dRows.HeadEnd ( bMoreResultsFollow );
// row
dRows.PutString ( "warning" );
dRows.PutString ( "1000" );
dRows.PutString ( tLastMeta.m_sWarning );
dRows.Commit();
// cleanup
dRows.Eof ( bMoreResultsFollow );
}
void HandleMysqlStatus ( RowBuffer_i & dRows, const SqlStmt_t & tStmt, bool bMoreResultsFollow )
{
VectorLike dStatus ( tStmt.m_sStringParam );
switch ( tStmt.m_eStmt )
{
case STMT_SHOW_STATUS:
BuildStatus ( dStatus );
break;
case STMT_SHOW_AGENT_STATUS:
BuildAgentStatus ( dStatus, tStmt.m_sIndex );
break;
default:
assert(0); // only 'show' statements allowed here.
break;
}
// result set header packet
if (!dRows.HeadOfStrings ( dStatus.Header() ))
return;
// send rows
for ( int iRow=0; iRow<dStatus.GetLength(); iRow+=2 )
if ( !dRows.DataTuplet ( dStatus[iRow+0].cstr (), dStatus[iRow+1].cstr () ) )
return;
// cleanup
dRows.Eof ( bMoreResultsFollow );
}
void HandleMysqlMeta ( RowBuffer_i & dRows, const SqlStmt_t & tStmt, const CSphQueryResultMeta & tLastMeta, bool bMoreResultsFollow )
{
VectorLike dMeta ( tStmt.m_sStringParam );
assert ( tStmt.m_eStmt==STMT_SHOW_META );
BuildMeta ( dMeta, tLastMeta );
// result set header packet
if ( !dRows.HeadOfStrings ( dMeta.Header () ) )
return;
// send rows
for ( int iRow=0; iRow<dMeta.GetLength(); iRow+=2 )
if ( !dRows.DataTuplet ( dMeta[iRow+0].cstr (), dMeta[iRow+1].cstr () ) )
return;
// cleanup
dRows.Eof ( bMoreResultsFollow );
}
static std::unique_ptr<ReplicationCommand_t> MakePercolateDeleteDocumentsCommand ( CSphString sIndex, CSphString sCluster, const SqlStmt_t & tStmt, CSphString & sError )
{
// prohibit double copy of filters
const CSphQuery& tQuery = tStmt.m_tQuery;
if ( tQuery.m_dFilters.IsEmpty() || tQuery.m_dFilters.GetLength() > 1 )
{
sError.SetSprintf ( "only single filter supported, got %d", tQuery.m_dFilters.GetLength() );
return nullptr;
}
const CSphFilterSettings* pFilter = tQuery.m_dFilters.Begin();
auto pCmd = MakeReplicationCommand ( ReplCmd_e::PQUERY_DELETE, std::move ( sIndex ), std::move ( sCluster ) );
if ( ( pFilter->m_bHasEqualMin || pFilter->m_bHasEqualMax ) && !pFilter->m_bExclude && pFilter->m_eType==SPH_FILTER_VALUES && ( pFilter->m_sAttrName=="@id" || pFilter->m_sAttrName=="id" || pFilter->m_sAttrName=="uid" ) )
{
pCmd->m_dDeleteQueries.Append ( pFilter->GetValues() );
return pCmd;
}
if ( pFilter->m_eType==SPH_FILTER_STRING && pFilter->m_sAttrName=="tags" && !pFilter->m_dStrings.IsEmpty() )
{
pCmd->m_sDeleteTags = pFilter->m_dStrings[0];
return pCmd;
}
if ( pFilter->m_eType==SPH_FILTER_STRING_LIST && pFilter->m_sAttrName=="tags" && !pFilter->m_dStrings.IsEmpty() )
{
StringBuilder_c tBuf ( "," );
pFilter->m_dStrings.for_each ( [&tBuf] ( const auto& sVal ) { tBuf << sVal; } );
tBuf.FinishBlocks ();
tBuf.MoveTo ( pCmd->m_sDeleteTags );
return pCmd;
}
sError.SetSprintf ( "unsupported filter type %d, attribute '%s'", pFilter->m_eType, pFilter->m_sAttrName.cstr() );
return nullptr;
}
static int LocalIndexDoDeleteDocuments ( const CSphString & sName, const char * sDistributed, const SqlStmt_t & tStmt,
SearchFailuresLog_c & dErrors, bool bCommit, CSphSessionAccum & tAcc )
{
const CSphString & sCluster = tStmt.m_sCluster;
const CSphString & sStore = tStmt.m_tQuery.m_sStore;
bool bOnlyStoreDocIDs = !sStore.IsEmpty();
CSphString sError;
auto err = [&sName, &sDistributed, &sError, &dErrors] (const char* szErr = nullptr)
{
dErrors.Submit ( sName, sDistributed, szErr ? szErr : sError.cstr() );
return 0;
};
cServedIndexRefPtr_c pServed { GetServed ( sName ) };
if ( !ServedDesc_t::IsMutable ( pServed ) )
return err ( "table not available, or does not support DELETE" );
GlobalCrashQueryGetRef().m_dIndex = FromStr ( sName );
if ( !ValidateClusterStatement ( sName, *pServed, sCluster, IsHttpStmt ( tStmt ) ) )
return err ( TlsMsg::szError() );
// process store to local variable instead of deletion (here we don't need any stuff like accum, txn, replication)
if ( bOnlyStoreDocIDs )
{
if ( pServed->m_eType == IndexType_e::PERCOLATE )
return err ( "Storing del subset not implemented for PQ tables" );
assert ( sStore.Begins ( "@" ) );
DocsCollector_c dData { tStmt.m_tQuery, tStmt.m_bJson, sName, pServed, &sError };
auto dDocs = dData.GetValuesSlice();
if ( !sError.IsEmpty() )
return err();
SetLocalTemporaryUserVar ( sStore, dDocs );
return 0;
}
RtAccum_t* pAccum = nullptr;
// goto to percolate path with unlocked index
if ( pServed->m_eType==IndexType_e::PERCOLATE )
{
auto pCmd = MakePercolateDeleteDocumentsCommand ( sName, sCluster, tStmt, sError );
if ( !sError.IsEmpty() )
return err();
if ( !pCmd )
return 0;
RIdx_T<RtIndex_i*> pRtIndex { pServed };
pAccum = tAcc.GetAcc ( pRtIndex, sError );
if ( !sError.IsEmpty() )
return err();
assert ( pAccum );
pAccum->m_dCmd.Add ( std::move ( pCmd ) );
} else
{
DocsCollector_c dData { tStmt.m_tQuery, tStmt.m_bJson, sName, pServed, &sError};
auto dDocs = dData.GetValuesSlice();
if ( !sError.IsEmpty() )
return err();
RIdx_T<RtIndex_i*> pRtIndex { pServed };
pAccum = tAcc.GetAcc ( pRtIndex, sError );
if ( !sError.IsEmpty() )
return err();
if ( !pRtIndex->DeleteDocument ( dDocs, sError, pAccum ) ) // assume dData is alive, as we use slice from internal vec
return err();
assert ( pAccum );
pAccum->AddCommand ( ReplCmd_e::RT_TRX, sName, sCluster );
}
int iAffected = 0;
if ( bCommit )
{
if ( !HandleCmdReplicateDelete ( *pAccum, iAffected ) )
{
dErrors.Submit ( sName, sDistributed, TlsMsg::szError() );
return 0;
}
}
return iAffected;
}
void sphHandleMysqlDelete ( StmtErrorReporter_i & tOut, const SqlStmt_t & tStmt, Str_t sQuery )
{
if ( !sphCheckWeCanModify ( tOut ) )
return;
auto* pSession = session::GetClientSession();
pSession->FreezeLastMeta();
bool bCommit = pSession->m_bAutoCommit && !pSession->m_bInTransaction;
auto& tAcc = pSession->m_tAcc;
StatCountCommand ( SEARCHD_COMMAND_DELETE );
MEMORY ( MEM_SQL_DELETE );
// shortcut
const CSphQuery & tQuery = tStmt.m_tQuery;
const CSphString & sStorevar = tQuery.m_sStore;
bool bStoreVar = !sStorevar.IsEmpty();
if ( bStoreVar && !sStorevar.Begins("@") )
{
tOut.Error ( "store var name must start with @, '%s' given", sStorevar.cstr() );
return;
}
StrVec_t dNames;
ParseIndexList ( tStmt.m_sIndex, dNames );
if ( dNames.IsEmpty() )
{
tOut.Error ( "no such table '%s'", tStmt.m_sIndex.cstr () );
return;
}
DistrPtrs_t dDistributed;
CSphString sMissed;
if ( !ExtractDistributedIndexes ( dNames, dDistributed, sMissed ) )
{
tOut.Error ( "unknown table '%s' in delete request", sMissed.cstr () );
return;
}
// delete to agents works only with commit=1
if ( !bCommit )
{
for ( auto &pDist : dDistributed )
{
if ( !pDist || pDist->m_dAgents.IsEmpty() )
continue;
tOut.Error ( "table '%s': DELETE is not supported on agents when autocommit=0", tStmt.m_sIndex.cstr() );
return;
}
}
// do delete
SearchFailuresLog_c dErrors;
int iAffected = 0;
// delete for local indexes
ARRAY_FOREACH ( iIdx, dNames )
{
const CSphString & sName = dNames[iIdx];
bool bLocal = g_pLocalIndexes->Contains ( sName );
if ( bLocal )
{
iAffected += LocalIndexDoDeleteDocuments ( sName, nullptr, tStmt, dErrors, bCommit, tAcc );
}
else if ( dDistributed[iIdx] )
{
assert ( !dDistributed[iIdx]->IsEmpty() );
for ( const CSphString& sLocal : dDistributed[iIdx]->m_dLocal )
{
bool bDistLocal = g_pLocalIndexes->Contains ( sLocal );
if ( bDistLocal )
{
iAffected += LocalIndexDoDeleteDocuments ( sLocal, sName.cstr(), tStmt, dErrors, bCommit, tAcc );
}
}
}
// delete for remote agents
if ( !bStoreVar && dDistributed[iIdx] && !dDistributed[iIdx]->m_dAgents.IsEmpty() )
{
const DistributedIndex_t * pDist = dDistributed[iIdx];
VecRefPtrsAgentConn_t dAgents;
pDist->GetAllHosts ( dAgents );
int iGot = 0;
int iWarns = 0;
// connect to remote agents and query them
std::unique_ptr<RequestBuilder_i> pRequestBuilder = CreateRequestBuilder ( sQuery, tStmt );
std::unique_ptr<ReplyParser_i> pReplyParser = CreateReplyParser ( tStmt.m_bJson, iGot, iWarns );
PerformRemoteTasks ( dAgents, pRequestBuilder.get (), pReplyParser.get () );
// FIXME!!! report error & warnings from agents
// FIXME? profile update time too?
iAffected += iGot;
}
}
if ( !dErrors.IsEmpty() )
{
StringBuilder_c sReport;
dErrors.BuildReport ( sReport );
tOut.Error ( "%s", sReport.cstr () );
return;
}
tOut.Ok ( iAffected );
}
// fwd
void HandleMysqlShowProfile ( RowBuffer_i & tOut, const QueryProfile_c & p, bool bMoreResultsFollow );
static void HandleMysqlShowPlan ( RowBuffer_i & tOut, const QueryProfile_c & p, bool bMoreResultsFollow, bool bDot );
bool IsDot ( const SqlStmt_t & tStmt )
{
if ( tStmt.m_sThreadFormat=="dot" )
return true;
else if ( tStmt.m_sThreadFormat=="plain" )
return false;
return session::IsDot();
}
Profile_e ParseProfileFormat ( const SqlStmt_t & tStmt )
{
if ( tStmt.m_sSetValue=="dot" )
return Profile_e::DOT;
else if ( tStmt.m_sSetValue=="expr" )
return Profile_e::DOTEXPR;
else if ( tStmt.m_sSetValue=="exprurl" )
return Profile_e::DOTEXPRURL;
else if ( tStmt.m_iSetValue!=0 )
return Profile_e::PLAIN;
return Profile_e::NONE;
}
void HandleMysqlMultiStmt ( const CSphVector<SqlStmt_t> & dStmt, CSphQueryResultMeta & tLastMeta, RowBuffer_i & dRows,
const CSphString & sWarning )
{
auto& tSess = session::Info();
// select count
int iSelect = dStmt.count_of ( [] ( const auto& tStmt ) { return tStmt.m_eStmt == STMT_SELECT; } );
CSphQueryResultMeta tPrevMeta = tLastMeta;
myinfo::SetCommand ( g_dSqlStmts[STMT_SELECT] );
AT_SCOPE_EXIT ( []() { myinfo::SetCommandDone(); } );
for ( int i=0; i<iSelect; i++ )
StatCountCommand ( SEARCHD_COMMAND_SEARCH );
auto tmStart = sphMicroTimer();
// setup query for searching
SearchHandler_c tHandler ( iSelect, sphCreatePlainQueryParser(), QUERY_SQL, true );
QueryProfile_c tProfile;
iSelect = 0;
for ( auto& tStmt: dStmt )
switch ( tStmt.m_eStmt )
{
case STMT_SELECT:
{
tHandler.SetQuery ( iSelect, tStmt.m_tQuery, std::move ( tStmt.m_pTableFunc ) );
++iSelect;
break;
}
case STMT_SET:
if ( tStmt.m_eSet == SET_LOCAL )
{
CSphString sSetName ( tStmt.m_sSetName );
sSetName.ToLower();
if ( sSetName == "profiling" )
tSess.SetProfile ( ParseProfileFormat ( tStmt ) );
}
default: break;
}
// use first meta for faceted search
bool bUseFirstMeta = ( tHandler.m_dQueries.GetLength()>1 && !tHandler.m_dQueries[0].m_bFacet && tHandler.m_dQueries[1].m_bFacet );
if ( tSess.IsProfile() )
tHandler.SetProfile ( &tProfile );
// do search
bool bSearchOK = true;
if ( iSelect )
{
bSearchOK = HandleMysqlSelect ( dRows, tHandler );
// save meta for SHOW *
if ( bUseFirstMeta )
{
tLastMeta = tHandler.m_dAggrResults.First();
// fix up overall query time
for ( auto& tResult : tHandler.m_dAggrResults )
{
tLastMeta.m_iQueryTime += tResult.m_iQueryTime;
tLastMeta.m_iCpuTime += tResult.m_iCpuTime;
tLastMeta.m_iAgentCpuTime += tResult.m_iAgentCpuTime;
}
} else
tLastMeta = tHandler.m_dAggrResults.Last();
}
if ( !bSearchOK )
return;
// send multi-result set
iSelect = 0;
ARRAY_FOREACH ( i, dStmt )
{
SqlStmt_e eStmt = dStmt[i].m_eStmt;
myinfo::SetCommand ( g_dSqlStmts[eStmt] );
AT_SCOPE_EXIT ( []() { myinfo::SetCommandDone(); } );
const CSphQueryResultMeta & tMeta = bUseFirstMeta ? tHandler.m_dAggrResults[0] : ( iSelect-1>=0 ? tHandler.m_dAggrResults[iSelect-1] : tPrevMeta );
bool bMoreResultsFollow = (i+1)<dStmt.GetLength();
bool bBreak = false;
switch ( eStmt )
{
case STMT_SELECT:
{
AggrResult_t & tRes = tHandler.m_dAggrResults[iSelect++];
// mysql server breaks send on error
bBreak = !tRes.m_iSuccesses;
if ( !sWarning.IsEmpty() )
tRes.m_sWarning = sWarning;
if ( bBreak )
bMoreResultsFollow = false;
auto uMatches = SendMysqlSelectResult ( dRows, tRes, bMoreResultsFollow, false, nullptr, ( tSess.IsProfile() ? &tProfile : nullptr ) );
StatCountCommandDetails ( SearchdStats_t::eSearch, uMatches, tmStart );
break;
}
case STMT_SHOW_WARNINGS:
HandleMysqlWarning ( tMeta, dRows, bMoreResultsFollow );
break;
case STMT_SHOW_STATUS:
case STMT_SHOW_AGENT_STATUS:
HandleMysqlStatus ( dRows, dStmt[i], bMoreResultsFollow ); // FIXME!!! add prediction counters
break;
case STMT_SHOW_META:
HandleMysqlMeta ( dRows, dStmt[i], tMeta, bMoreResultsFollow ); // FIXME!!! add prediction counters
break;
case STMT_SET: // TODO implement all set statements and make them handle bMoreResultsFollow flag
dRows.Ok ( 0, 0, NULL, bMoreResultsFollow );
break;
case STMT_SHOW_PROFILE:
HandleMysqlShowProfile ( dRows, tProfile, bMoreResultsFollow );
break;
case STMT_SHOW_PLAN:
HandleMysqlShowPlan ( dRows, tProfile, bMoreResultsFollow, ::IsDot ( dStmt[i] ) );
default:
break;
}
if ( bBreak )
break;
if ( sphInterrupted() )
{
sphLogDebug ( "HandleMultiStmt: got SIGTERM, sending the packet MYSQL_ERR_SERVER_SHUTDOWN" );
dRows.Error ( "Server shutdown in progress", EMYSQL_ERR::SERVER_SHUTDOWN );
return;
}
}
}
static bool HandleSetLocal ( CSphString& sError, const CSphString& sName, int64_t iSetValue, CSphString sSetValue, CSphSessionAccum& tAcc )
{
auto& tSess = session::Info();
if ( sName == "wait_timeout" || sName == "net_read_timeout" )
{
tSess.SetTimeoutS ( iSetValue );
return true;
}
if ( sName == "throttling_period" )
{
tSess.SetThrottlingPeriodMS ( iSetValue );
return true;
}
if ( sName == "net_write_timeout" )
{
tSess.SetWTimeoutS ( iSetValue );
return true;
}
if ( sName == "thread_stack" )
{
session::SetMaxStackSize ( Max ( iSetValue, 1024 * 1024 ) );
return true;
}
if ( sName == "optimize_by_id" )
{
session::SetOptimizeById ( !!iSetValue );
return true;
}
if ( sName == "max_threads_per_query" )
{
tSess.SetDistThreads ( iSetValue );
return true;
}
if ( sName == "ro" )
{
if ( !tSess.GetVip() )
{
if ( !sphCheckWeCanModify (sError) )
return true;
}
tSess.SetReadOnly ( !!iSetValue );
return true;
}
if ( sName == "threads_ex" )
{
auto dDispatchers = Dispatcher::ParseTemplates ( sSetValue.cstr() );
tSess.SetBaseDispatcherTemplate ( dDispatchers.first );
tSess.SetPseudoShardingDispatcherTemplate ( dDispatchers.second );
return true;
}
// move check here from bison parser. Only boolean allowed below.
if ( iSetValue != 0 && iSetValue != 1 )
{
sError = SphSprintf ( "sphinxql: only 0 and 1 could be used as boolean values near '%d'", iSetValue );
return true;
}
if ( sName == "autocommit" )
{
// per-session AUTOCOMMIT
bool bAutoCommit = ( iSetValue != 0 );
auto pSession = session::Info().GetClientSession();
pSession->m_bAutoCommit = bAutoCommit;
pSession->m_bInTransaction = false;
// commit all pending changes
if ( bAutoCommit && tAcc.GetIndex() && !HandleCmdReplicate ( *tAcc.GetAcc() ) )
{
TlsMsg::MoveError(sError);
return false;
}
return true;
}
if ( sName == "collation_connection" )
{
// per-session COLLATION_CONNECTION
CSphString& sVal = sSetValue;
sVal.ToLower();
tSess.SetCollation ( sphCollationFromName ( sVal, &sError ) );
return true;
}
if ( sName == "sql_quote_show_create" )
{
// per-session sql_quote_show_create
tSess.SetSqlQuoteShowCreate ( iSetValue!=0 );
return true;
}
if ( sName == "character_set_results"
|| sName == "sql_auto_is_null"
|| sName == "sql_safe_updates"
|| sName == "sql_mode"
|| sName == "time_zone" )
{
// per-session CHARACTER_SET_RESULTS at all; just ignore for now
return true;
}
return false;
}
static bool HandleSetGlobal ( CSphString& sError, const CSphString& sName, int64_t iSetValue, CSphString sSetValue )
{
auto& tSess = session::Info();
if ( !tSess.GetVip() && !sphCheckWeCanModify ( sError ) )
return true;
// global server variable
if ( sName == "query_log_format" )
{
if ( sSetValue == "plain" )
g_eLogFormat = LOG_FORMAT_PLAIN;
else if ( sSetValue == "sphinxql" )
g_eLogFormat = LOG_FORMAT_SPHINXQL;
else
sError = "Unknown query_log_format value (must be plain or sphinxql)";
return true;
}
if ( sName == "log_level" )
{
if ( sSetValue == "info" )
g_eLogLevel = SPH_LOG_INFO;
else if ( sSetValue == "debug" )
g_eLogLevel = SPH_LOG_DEBUG;
else if ( sSetValue == "debugv" )
g_eLogLevel = SPH_LOG_VERBOSE_DEBUG;
else if ( sSetValue == "debugvv" )
g_eLogLevel = SPH_LOG_VERY_VERBOSE_DEBUG;
else if ( sSetValue == "replication" )
g_eLogLevel = SPH_LOG_RPL_DEBUG;
else if ( sSetValue.Begins ( "http" ) )
{
if ( !HttpSetLogVerbosity ( sSetValue ) )
sError = "Unknown log_level value (http_on, http_off, http_bad_req_on, http_bad_req_off)";
} else
sError = "Unknown log_level value (must be one of info, debug, debugv, debugvv, replication)";
return true;
}
if ( sName == "query_log_min_msec" )
{
g_iQueryLogMinMs = (int)iSetValue;
return true;
}
if ( sName == "qcache_max_bytes" )
{
const QcacheStatus_t& s = QcacheGetStatus();
QcacheSetup ( iSetValue, s.m_iThreshMs, s.m_iTtlS );
return true;
}
if ( sName == "qcache_thresh_msec" )
{
const QcacheStatus_t& s = QcacheGetStatus();
QcacheSetup ( s.m_iMaxBytes, (int)iSetValue, s.m_iTtlS );
return true;
}
if ( sName == "qcache_ttl_sec" )
{
const QcacheStatus_t& s = QcacheGetStatus();
QcacheSetup ( s.m_iMaxBytes, s.m_iThreshMs, (int)iSetValue );
return true;
}
if ( sName == "log_debug_filter" )
{
int iLen = sName.Length();
iLen = Min ( iLen, SPH_MAX_FILENAME_LEN );
memcpy ( g_sLogFilter, sSetValue.cstr(), iLen );
g_sLogFilter[iLen] = '\0';
g_iLogFilterLen = iLen;
return true;
}
if ( sName == "log_http_filter" )
{
SetLogHttpFilter ( sSetValue );
return true;
}
if ( sName == "es_compat" )
{
return SetLogManagement ( sSetValue, sError );
}
if ( sName == "net_wait" )
{
g_tmWaitUS = iSetValue * 1000LL;
return true;
}
if ( sName == "grouping_in_utc" )
{
if ( IsTimeZoneSet() )
{
sError = "grouping_in_utc=1 conflicts with 'timezone'";
return true;
}
SetGroupingInUTC ( !!iSetValue );
return true;
}
if ( sName == "timezone" )
{
if ( GetGroupingInUTC() )
{
sError = "grouping_in_utc=1 conflicts with 'timezone'";
return true;
}
SetTimeZone ( sSetValue.cstr(), sError );
return true;
}
if ( sName == "cpustats" )
{
g_bCpuStats = !!iSetValue;
return true;
}
if ( sName == "iostats" )
{
g_bIOStats = !!iSetValue;
return true;
}
if ( sName == "coredump" )
{
g_bCoreDump = !!iSetValue;
return true;
}
if ( sName == "maintenance" )
{
if ( tSess.GetVip() )
g_bMaintenance = !!iSetValue;
else
sError = "Only VIP connections can set maintenance mode";
return true;
}
if ( sName == "thread_stack" )
{
if ( tSess.GetVip() )
Threads::SetMaxCoroStackSize ( Max ( iSetValue, 1024 * 1024 ) );
else
sError = "Only VIP connections can change global thread_stack value";
return true;
}
if ( sName == "wait_timeout" )
{
if ( tSess.GetVip() )
g_iClientQlTimeoutS = iSetValue;
else
sError = "Only VIP connections can change global wait_timeout value";
return true;
}
if ( sName == "net_read_timeout" || sName == "read_timeout")
{
if ( tSess.GetVip() )
g_iReadTimeoutS = iSetValue;
else
sError = "Only VIP connections can change global net_read_timeout value";
return true;
}
if ( sName == "net_write_timeout" )
{
if ( tSess.GetVip() )
g_iWriteTimeoutS = iSetValue;
else
sError = "Only VIP connections can change global net_write_timeout value";
return true;
}
if ( sName == "network_timeout" )
{
if ( tSess.GetVip() )
{
g_iWriteTimeoutS = iSetValue;
g_iReadTimeoutS = iSetValue;
}
else
sError = "Only VIP connections can change global network_timeout value";
return true;
}
if ( sName == "reset_network_timeout_on_packet" )
{
if ( tSess.GetVip() )
{
g_bTimeoutEachPacket = !!iSetValue;
} else
sError = "Only VIP connections can change global reset_network_timeout_on_packet value";
return true;
}
if ( sName == "throttling_period" )
{
if ( tSess.GetVip() )
Threads::Coro::SetDefaultThrottlingPeriodMS ( iSetValue );
else
sError = "Only VIP connections can change global throttling_period value";
return true;
}
if ( sName == "max_threads_per_query" )
{
g_iDistThreads = iSetValue; // that is not dangerous to allow everybody change the value
return true;
}
if ( sName == "auto_optimize" )
{
if ( !AUTOOPTIMIZE_NEEDS_VIP || tSess.GetVip() )
g_iAutoOptimizeCutoffMultiplier = iSetValue;
else
sError = "Only VIP connections can change global auto_optimize value";
return true;
}
if ( sName == "optimize_cutoff" )
{
if ( iSetValue < 1 )
sError = SphSprintf( "optimize_cutoff should be greater than 0, got %d", iSetValue );
else
MutableIndexSettings_c::GetDefaults().m_iOptimizeCutoff = iSetValue;
return true;
}
if ( sName == "pseudo_sharding" )
{
SetPseudoSharding ( !!iSetValue );
return true;
}
if ( sName == "secondary_indexes" )
{
SetSecondaryIndexDefault ( iSetValue != 0 ? SIDefault_e::ENABLED : SIDefault_e::DISABLED );
return true;
}
if ( sName == "accurate_aggregation" )
{
SetAccurateAggregationDefault ( !!iSetValue );
return true;
}
if ( sName == "distinct_precision_threshold" )
{
SetDistinctThreshDefault ( iSetValue );
return true;
}
if ( sName == "threads_ex" )
{
if ( !THREAD_EX_NEEDS_VIP || tSess.GetVip() )
Dispatcher::SetGlobalDispatchers ( sSetValue.cstr() );
else
sError = "Only VIP connections can change global threads_ex value";
return true;
}
if ( sName=="expansion_merge_threshold_docs" )
{
if ( iSetValue<0 )
sError.SetSprintf ( "expansion_merge_threshold_docs should be positive value, got " INT64_FMT, iSetValue );
else
ExpandedMergeThdDocs ( iSetValue );
return true;
}
if ( sName=="expansion_merge_threshold_hits" )
{
if ( iSetValue<0 )
sError.SetSprintf ( "expansion_merge_threshold_hits should be positive value, got " INT64_FMT, iSetValue );
else
ExpandedMergeThdHits ( iSetValue );
return true;
}
if ( sName=="cluster_user" )
{
g_sClusterUser = std::move ( sSetValue );
return true;
}
return false;
}
void HandleMysqlSet ( RowBuffer_i & tOut, SqlStmt_t & tStmt, CSphSessionAccum & tAcc )
{
auto& tSess = session::Info();
MEMORY ( MEM_SQL_SET );
CSphString sError;
tStmt.m_sSetName.ToLower();
switch ( tStmt.m_eSet )
{
case SET_LOCAL: // SET foo = value|'svalue'|null
if ( !HandleSetLocal ( sError, tStmt.m_sSetName, tStmt.m_iSetValue, tStmt.m_sSetValue, tAcc) )
{
if ( tStmt.m_sSetName == "profiling" )
{
// per-session PROFILING
tSess.SetProfile ( ParseProfileFormat ( tStmt ) );
} else
{
// unknown variable, return error
tOut.ErrorEx ( "Unknown session variable '%s' in SET statement", tStmt.m_sSetName.cstr() );
return;
}
}
if ( sError.IsEmpty() )
break;
else {
tOut.ErrorEx ( "%s", sError.cstr() );
return;
}
break;
case SET_GLOBAL_UVAR: // SET GLOBAL @foo = (i1,i2,...)'
{
// global user variable
// INT_SET type must be sorted
tStmt.m_dSetValues.Sort();
SetLocalUserVar ( tStmt.m_sSetName, tStmt.m_dSetValues );
break;
}
case SET_GLOBAL_SVAR: // SET GLOBAL foo = iValue|'string'
if ( !HandleSetGlobal ( sError, tStmt.m_sSetName, tStmt.m_iSetValue, tStmt.m_sSetValue ) )
{
tOut.ErrorEx ( "Unknown system variable '%s'", tStmt.m_sSetName.cstr() );
return;
}
if ( sError.IsEmpty() )
break;
else {
tOut.ErrorEx ( "%s", sError.cstr() );
return;
}
break;
case SET_INDEX_UVAR: // SET INDEX bar GLOBAL @foo = (values)
if ( !SendUserVar ( tStmt.m_sIndex.cstr(), tStmt.m_sSetName.cstr(), tStmt.m_dSetValues, sError ) )
{
tOut.Error ( sError.cstr() );
return;
}
break;
case SET_CLUSTER_UVAR: // SET CLUSTER ident GLOBAL 'variable' = string|int
{
if ( !ReplicateSetOption ( tStmt.m_sIndex, tStmt.m_sSetName, tStmt.m_sSetValue ) )
{
tOut.Error ( TlsMsg::szError() );
return;
}
}
break;
case SET_EXTRA: // relaxed SET SESSION foo=1, GLOBAL fee='bar', SESSION a='b', etc.
ARRAY_FOREACH (i, tStmt.m_dInsertSchema)
{
auto sName = tStmt.m_dInsertSchema[i];
sName.ToLower();
if ( tStmt.m_dInsertValues[i].m_iType & 1) // lowest bit 1 means 'session', 0 means 'global'
{
if ( !HandleSetLocal ( sError, sName, tStmt.m_dInsertValues[i].GetValueInt(), tStmt.m_dInsertValues[i].m_sVal, tAcc ) )
{
// unknown variable, return error
tOut.ErrorEx ( "Unknown session variable '%s' in SET statement", sName.cstr() );
return;
}
} else {
if ( !HandleSetGlobal ( sError, sName, tStmt.m_dInsertValues[i].GetValueInt(), tStmt.m_dInsertValues[i].m_sVal ) )
{
// unknown variable, return error
tOut.ErrorEx ( "Unknown system variable '%s'", sName.cstr() );
return;
}
}
}
if ( !sError.IsEmpty() )
{
tOut.ErrorEx ( "%s", sError.cstr() );
return;
}
break;
default:
tOut.ErrorEx ( "internal error: unhandled SET mode %d", (int) tStmt.m_eSet );
return;
}
// it went ok
tOut.Ok();
}
void HandleMysqlAttach ( RowBuffer_i & tOut, const SqlStmt_t & tStmt, CSphString & sWarning )
{
if ( !sphCheckWeCanModify ( tOut ) )
return;
const CSphString & sFrom = tStmt.m_sIndex;
const CSphString & sTo = tStmt.m_sStringParam;
bool bTruncate = ( tStmt.m_iIntParam==1 );
if ( sFrom==sTo )
{
tOut.ErrorEx ( "can not ATTACH table '%s' to itself", sFrom.cstr() );
return;
}
auto pServedFrom = GetServed ( sFrom );
auto pServedTo = GetServed ( sTo );
if ( !pServedFrom )
{
tOut.ErrorEx ( "no such table '%s'", sFrom.cstr() );
return;
} else if ( pServedFrom->m_eType!=IndexType_e::PLAIN && pServedFrom->m_eType!=IndexType_e::RT )
{
tOut.Error ( "1st argument to ATTACH must be a plain or a RT table" );
return;
} else if ( !pServedTo )
{
tOut.ErrorEx ( "no such table '%s'", sTo.cstr() );
return;
} else if ( pServedTo->m_eType!=IndexType_e::RT )
{
tOut.Error ( "2nd argument to ATTACH must be a RT table" );
return;
}
// cluster does not implement ATTACH for now
auto tClusterTo = IsPartOfCluster ( pServedTo );
auto tClusterFrom = IsPartOfCluster ( pServedFrom );
if ( tClusterTo || tClusterFrom )
{
if ( tClusterTo )
tOut.ErrorEx ( "table %s is part of cluster %s, can not issue ATTACH", sTo.cstr(), tClusterTo->cstr() );
else
tOut.ErrorEx ( "table %s is part of cluster %s, can not issue ATTACH", sFrom.cstr(), tClusterFrom->cstr() );
return;
}
bool bFatal = false;
bool bAttached = false;
CSphString sError;
WIdx_T<RtIndex_i *> pTo { pServedTo };
if ( pServedFrom->m_eType==IndexType_e::PLAIN )
{
WIdx_c pPlainFrom { pServedFrom };
bAttached = pTo->AttachDiskIndex ( pPlainFrom, bTruncate, bFatal, sError );
if ( bAttached || bFatal )
g_pLocalIndexes->Delete ( sFrom );
if ( bAttached )
pServedFrom->ReleaseIdx(); // since index no more belong to us
} else
{
WIdx_T<RtIndex_i*> pFrom { pServedFrom };
bAttached = pTo->AttachRtIndex ( pFrom, bTruncate, bFatal, sError );
if ( bFatal )
g_pLocalIndexes->Delete ( sFrom );
}
if ( bAttached )
tOut.Ok();
else
tOut.Error ( sError.cstr() );
}
void HandleMysqlFlushRtindex ( RowBuffer_i & tOut, const SqlStmt_t & tStmt )
{
CSphString sError;
auto pIndex = GetServed ( tStmt.m_sIndex );
if ( !ServedDesc_t::IsMutable ( pIndex ) )
{
tOut.Error ( "FLUSH RTINDEX requires an existing RT table" );
return;
}
RIdx_T<RtIndex_i*> ( pIndex )->ForceRamFlush ( "forced" );
tOut.Ok();
}
void HandleMysqlFlushRamchunk ( RowBuffer_i & tOut, const SqlStmt_t & tStmt )
{
auto pIndex = GetServed ( tStmt.m_sIndex );
if ( !ServedDesc_t::IsMutable ( pIndex ) )
{
tOut.Error ( "FLUSH RAMCHUNK requires an existing RT table" );
return;
}
RIdx_T<RtIndex_i*> pRt { pIndex };
if ( !pRt->ForceDiskChunk() )
{
CSphString sError;
sError.SetSprintf ( "table '%s': FLUSH RAMCHUNK failed; TABLE UNUSABLE (%s)", tStmt.m_sIndex.cstr(), pRt->GetLastError().cstr() );
tOut.Error ( sError.cstr () );
g_pLocalIndexes->Delete ( tStmt.m_sIndex );
return;
}
tOut.Ok();
}
void HandleMysqlFlush ( RowBuffer_i & tOut, const SqlStmt_t & )
{
int iTag = CommandFlush();
tOut.HeadBegin ();
tOut.HeadColumn ( "tag", MYSQL_COL_LONG );
tOut.HeadEnd ();
// data packet, var value
tOut.PutNumAsString ( iTag );
tOut.Commit();
// done
tOut.Eof();
}
int GetLogFD ()
{
if ( g_bLogStdout && g_iLogFile!=STDOUT_FILENO )
return STDOUT_FILENO;
return g_iLogFile;
}
const CSphString & sphGetLogFile () noexcept
{
return g_sLogFile;
}
// same for select ... from index.files
void HandleSelectFiles ( RowBuffer_i & tOut, const SqlStmt_t * pStmt )
{
tOut.HeadBegin ();
tOut.HeadColumn ( "file" );
tOut.HeadColumn ( "normalized" );
tOut.HeadColumn ( "size", MYSQL_COL_LONGLONG );
if ( !tOut.HeadEnd () )
return;
const auto & tStmt = *pStmt;
auto pServed = GetServed ( tStmt.m_sIndex );
if ( !ServedDesc_t::IsLocal ( pServed ) )
{
tOut.Error ( "FILES requires an existing local table" );
return;
}
StrVec_t dFiles;
StrVec_t dExt;
RIdx_c ( pServed )->GetIndexFiles ( dFiles, dExt );
auto sFormat = tStmt.m_sThreadFormat;
if ( sFormat!="external" )
ARRAY_CONSTFOREACH( i, dFiles )
{
tOut.PutString ( dFiles[i] );
tOut.PutString ( RealPath ( dFiles[i] ) );
tOut.PutNumAsString ( sphGetFileSize ( dFiles[i], nullptr ) );
if ( !tOut.Commit () )
return;
}
if ( sFormat=="all" || sFormat=="external" )
{
dExt.Uniq ();
ARRAY_CONSTFOREACH( i, dExt )
{
tOut.PutString ( dExt[i] );
tOut.PutString ( RealPath ( dExt[i] ) );
tOut.PutNumAsString ( sphGetFileSize ( dExt[i], nullptr ) );
if ( !tOut.Commit () )
return;
}
}
tOut.Eof();
}
// fwd
static bool PrepareReconfigure ( const char * szIndex, CSphReconfigureSettings & tSettings, StrVec_t * pWarnings, CSphString & sError );
void HandleMysqlTruncate ( RowBuffer_i & tOut, const SqlStmt_t & tStmt, CSphString & sWarning )
{
if ( !sphCheckWeCanModify ( tOut ) )
return;
bool bReconfigure = ( tStmt.m_iIntParam==1 );
auto pCmd = MakeReplicationCommand ( ReplCmd_e::TRUNCATE, tStmt.m_sIndex, tStmt.m_sCluster );
CSphString sError;
StrVec_t dWarnings;
const CSphString & sIndex = tStmt.m_sIndex;
if ( bReconfigure )
{
pCmd->m_tReconfigure = std::make_unique<CSphReconfigureSettings>();
pCmd->m_tReconfigure->m_bChangeSchema = true;
}
if ( bReconfigure && !PrepareReconfigure ( sIndex.cstr(), *pCmd->m_tReconfigure, &dWarnings, sError ) )
{
tOut.Error ( sError.cstr () );
return;
}
// get an exclusive lock for operation
// but only read lock for check
{
auto pIndex = GetServed ( sIndex );
if ( !ServedDesc_t::IsMutable ( pIndex ) )
{
tOut.Error ( "TRUNCATE RTINDEX requires an existing RT table" );
return;
}
if ( !ValidateClusterStatement ( sIndex, *pIndex, tStmt.m_sCluster, IsHttpStmt ( tStmt ) ) )
{
tOut.Error ( TlsMsg::szError() );
return;
}
}
auto* pSession = session::GetClientSession();
auto& tAcc = pSession->m_tAcc;
auto* pAccum = tAcc.GetAcc();
pAccum->m_dCmd.Add ( std::move ( pCmd ) );
sWarning = ConcatWarnings ( dWarnings );
bool bRes = HandleCmdReplicate ( *pAccum );
if ( !bRes )
tOut.Error ( TlsMsg::szError() );
else
tOut.Ok ( 0, dWarnings.GetLength() );
}
void HandleMysqlOptimize ( RowBuffer_i & tOut, const SqlStmt_t & tStmt )
{
if ( !sphCheckWeCanModify ( tOut ) )
return;
auto sIndex = tStmt.m_sIndex;
auto pIndex = GetServed ( sIndex );
if ( !ServedDesc_t::IsMutable ( pIndex ) )
{
tOut.Error ( "OPTIMIZE TABLE requires an existing RT table" );
return;
}
OptimizeTask_t tTask;
tTask.m_eVerb = OptimizeTask_t::eManualOptimize;
tTask.m_iCutoff = tStmt.m_tQuery.m_iCutoff<=0 ? 0 : tStmt.m_tQuery.m_iCutoff;
auto bOptimizeStarted = RIdx_T<RtIndex_i *> ( pIndex )->StartOptimize ( std::move ( tTask ) );
if ( tStmt.m_tQuery.m_bSync && !bOptimizeStarted )
{
tOut.Error ( "Can't optimize frozen table" );
return;
}
if ( tStmt.m_tQuery.m_bSync && !PollOptimizeRunning ( sIndex ) )
tOut.Error ( "RT table went away during waiting" );
else
tOut.Ok ();
}
class ExtraLastInsertID_c final: public ISphExtra
{
bool ExtraDataImpl ( ExtraData_e eCmd, void** pData ) final
{
if ( eCmd != EXTRA_GET_LAST_INSERT_ID )
return false;
auto* sVal = (CSphString*)pData;
assert ( sVal );
StringBuilder_c tBuf ( "," );
session::Info().GetClientSession()->m_dLastIds.for_each ( [&tBuf] ( auto& iId ) { tBuf << iId; } );
tBuf.MoveTo ( *sVal );
return true;
}
};
// STMT_SELECT_COLUMNS: SELECT @@sysvar1 [ as alias] [@@sysvarN [ as alias]] [limit M]
// SELECT expr, @@sysvar1, expr2, ... [limit M]
void HandleMysqlSelectColumns ( RowBuffer_i & tOut, const SqlStmt_t & tStmt, ClientSession_c* pSession )
{
struct SysVar_t
{
const MysqlColumnType_e m_eType;
const char* m_szName;
std::function<CSphString ( void )> m_fnValue;
};
const bool bHasBuddy = HasBuddy();
const SysVar_t tDefaultStr { MYSQL_COL_STRING, nullptr, [] { return "<empty>"; } };
const SysVar_t tDefaultNum { MYSQL_COL_LONG, nullptr, [] { return "0"; } };
const SysVar_t dSysvars[] =
{ bHasBuddy ? tDefaultNum : tDefaultStr, // stub
{ MYSQL_COL_LONG, "@@session.auto_increment_increment", [] {return "1";}},
{ MYSQL_COL_STRING, "@@character_set_client", [] {return "utf8";}},
{ MYSQL_COL_STRING, "@@character_set_connection", [] {return "utf8";}},
{ MYSQL_COL_LONG, "@@max_allowed_packet", [] { StringBuilder_c s; s << g_iMaxPacketSize; return CSphString(s); }},
{ MYSQL_COL_STRING, "@@version_comment", [] { return szGIT_BRANCH_ID;}},
{ MYSQL_COL_LONG, "@@lower_case_table_names", [] { return "1"; }},
{ MYSQL_COL_STRING, "@@session.last_insert_id", [pSession] {
StringBuilder_c s ( "," );
pSession->m_dLastIds.Apply ( [&s] ( int64_t iID ) { s << iID; } );
return CSphString ( s );
}},
{ MYSQL_COL_LONG, "@@autocommit", [pSession] { return pSession->m_bAutoCommit ? "1" : "0"; } },
};
auto VarIdxByName = [&dSysvars] ( const CSphString& sName ) noexcept -> int
{
constexpr auto iSysvars = sizeof ( dSysvars ) / sizeof ( dSysvars[0] );
for ( int i = 1; i<(int)iSysvars; ++i )
if ( sName == dSysvars[i].m_szName )
return i;
return 0;
};
const auto& dItems = tStmt.m_tQuery.m_dItems;
struct PreparedItem_t {
ESphAttr m_eType;
MysqlColumnType_e m_eTypeMysql;
ISphExprRefPtr_c m_pExpr;
int m_iSysvarIdx;
const char* m_szAlias;
CSphString m_sError;
};
CSphVector<PreparedItem_t> dColumns;
bool bHaveValidExpressions = false; // whether we have at least one expression among @@sysvars
bool bHaveInvalidExpressions = false; // whether at least one expression is erroneous
for ( const auto& dItem : dItems )
{
bool bIsExpr = !dItem.m_sExpr.Begins ( "@@" );
CSphString sError;
auto iVar = VarIdxByName ( dItem.m_sExpr );
if ( !iVar )
{
CSphString sVar = dItem.m_sExpr;
CSphSchema tSchema;
ESphAttr eAttrType;
ExprParseArgs_t tExprArgs;
tExprArgs.m_pAttrType = &eAttrType;
ISphExprRefPtr_c pExpr { sphExprParse ( sVar.cstr(), tSchema, nullptr, sError, tExprArgs ) };
if ( pExpr )
{
dColumns.Add ( { eAttrType, ESphAttr2MysqlColumn ( eAttrType ), pExpr, -1, dItem.m_sAlias.cstr() } );
bHaveValidExpressions = true;
continue;
}
bHaveInvalidExpressions |= bIsExpr;
}
dColumns.Add ( { SPH_ATTR_NONE, dSysvars[iVar].m_eType, nullptr, iVar, dItem.m_sAlias.cstr(), sError } );
}
assert ( dColumns.GetLength() == dItems.GetLength() );
// fail when we have error(s) in expression(s).
if ( bHaveInvalidExpressions )
{
StringBuilder_c sError ("; ");
dColumns.for_each( [&sError] (const PreparedItem_t& dCol) { if ( !dCol.m_sError.IsEmpty()) sError << dCol.m_sError; });
tOut.Error ( sError.cstr() );
return;
}
// fill header
tOut.HeadBegin ();
for ( const auto& dColumn : dColumns )
tOut.HeadColumn ( dColumn.m_szAlias, dColumn.m_eTypeMysql );
if ( !tOut.HeadEnd() )
return;
if ( bHaveValidExpressions )
{
ExtraLastInsertID_c tIds;
for ( auto& pExpr : dColumns )
if ( pExpr.m_pExpr )
pExpr.m_pExpr->Command ( SPH_EXPR_SET_EXTRA_DATA, &tIds );
}
std::optional<ExtraLastInsertID_c> tIds;
// fill values
for ( auto& dColumn : dColumns )
{
if ( dColumn.m_pExpr ) // expression
{
if ( !tIds.has_value() )
tIds.emplace();
auto& pExpr = dColumn.m_pExpr;
pExpr->Command ( SPH_EXPR_SET_EXTRA_DATA, &tIds.value() );
CSphMatch tMatch;
switch ( dColumn.m_eType )
{
case SPH_ATTR_STRINGPTR:
{
const BYTE* pStr = nullptr;
int iLen = pExpr->StringEval ( tMatch, &pStr );
tOut.PutArray ( { pStr, iLen } );
FreeDataPtr ( *pExpr, pStr );
break;
}
case SPH_ATTR_INTEGER: tOut.PutNumAsString ( pExpr->IntEval ( tMatch ) ); break;
case SPH_ATTR_BIGINT: tOut.PutNumAsString ( pExpr->Int64Eval ( tMatch ) ); break;
case SPH_ATTR_UINT64: tOut.PutNumAsString ( (uint64_t)pExpr->Int64Eval ( tMatch ) ); break;
case SPH_ATTR_FLOAT: tOut.PutFloatAsString ( pExpr->Eval ( tMatch ) ); break;
case SPH_ATTR_DOUBLE: tOut.PutDoubleAsString ( pExpr->Eval ( tMatch ) ); break;
default:
tOut.PutNULL();
break;
}
}
else
tOut.PutString ( dSysvars[dColumn.m_iSysvarIdx].m_fnValue() );
}
// finalize
tOut.Commit ();
tOut.Eof ();
}
void HandleMysqlShowCollations ( RowBuffer_i & tOut )
{
// MySQL Connector/J really expects an answer here
// field packets
tOut.HeadBegin ();
tOut.HeadColumn ( "Collation" );
tOut.HeadColumn ( "Charset" );
tOut.HeadColumn ( "Id", MYSQL_COL_LONGLONG );
tOut.HeadColumn ( "Default" );
tOut.HeadColumn ( "Compiled" );
tOut.HeadColumn ( "Sortlen" );
tOut.HeadEnd();
// data packets
tOut.PutString ( "utf8_general_ci" );
tOut.PutString ( "utf8" );
tOut.PutString ( "33" );
tOut.PutString ( "Yes" );
tOut.PutString ( "Yes" );
tOut.PutString ( "1" );
tOut.Commit();
// done
tOut.Eof();
}
void HandleMysqlShowCharacterSet ( RowBuffer_i & tOut )
{
// MySQL Connector/J really expects an answer here
// field packets
tOut.HeadBegin ();
tOut.HeadColumn ( "Charset" );
tOut.HeadColumn ( "Description" );
tOut.HeadColumn ( "Default collation" );
tOut.HeadColumn ( "Maxlen" );
tOut.HeadEnd();
// data packets
tOut.PutString ( "utf8" );
tOut.PutString ( "UTF-8 Unicode" );
tOut.PutString ( "utf8_general_ci" );
tOut.PutString ( "3" );
tOut.Commit();
// done
tOut.Eof();
}
const char * sphCollationToName ( ESphCollation eColl )
{
switch ( eColl )
{
case SPH_COLLATION_LIBC_CI: return "libc_ci";
case SPH_COLLATION_LIBC_CS: return "libc_cs";
case SPH_COLLATION_UTF8_GENERAL_CI: return "utf8_general_ci";
case SPH_COLLATION_BINARY: return "binary";
default: return "unknown";
}
}
static const char * LogLevelName ( ESphLogLevel eLevel )
{
switch ( eLevel )
{
case SPH_LOG_FATAL: return "fatal";
case SPH_LOG_WARNING: return "warning";
case SPH_LOG_INFO: return "info";
case SPH_LOG_DEBUG: return "debug";
case SPH_LOG_RPL_DEBUG: return "replication";
case SPH_LOG_VERBOSE_DEBUG: return "debugv";
case SPH_LOG_VERY_VERBOSE_DEBUG: return "debugvv";
default: return "unknown";
}
}
// SHOW [GLOBAL|SESSION] VARIABLES WHERE variable_name='xxx' [OR variable_name='xxx']
// SHOW [GLOBAL|SESSION] VARIABLES WHERE variable_name='xxx' [OR variable_name='xxx']
void HandleMysqlShowVariables ( RowBuffer_i & dRows, const SqlStmt_t & tStmt )
{
VectorLike dTable ( tStmt.m_sStringParam );
{
auto pVars = session::Info().GetClientSession();
dTable.MatchTuplet ( "autocommit", pVars->m_bAutoCommit ? "1" : "0" );
dTable.MatchTupletf ( "auto_optimize", "%d", g_iAutoOptimizeCutoffMultiplier );
dTable.MatchTupletf ( "optimize_cutoff", "%d", MutableIndexSettings_c::GetDefaults().m_iOptimizeCutoff );
dTable.MatchTuplet ( "collation_connection", sphCollationToName ( session::GetCollation() ) );
dTable.MatchTuplet ( "query_log_format", g_eLogFormat==LOG_FORMAT_PLAIN ? "plain" : "sphinxql" );
dTable.MatchTuplet ( "session_read_only", session::GetReadOnly() ? "1" : "0" );
dTable.MatchTuplet ( "log_level", LogLevelName ( g_eLogLevel ) );
dTable.MatchTupletf ( "max_allowed_packet", "%d", g_iMaxPacketSize );
dTable.MatchTuplet ( "character_set_client", "utf8" );
dTable.MatchTuplet ( "character_set_connection", "utf8" );
dTable.MatchTuplet ( "grouping_in_utc", GetGroupingInUTC() ? "1" : "0" );
dTable.MatchTuplet ( "timezone", GetTimeZoneName().cstr() );
dTable.MatchTupletFn ( "last_insert_id" , [&pVars]
{
StringBuilder_c tBuf ( "," );
pVars->m_dLastIds.Apply ( [&tBuf] ( int64_t iID ) { tBuf << iID; } );
return tBuf;
});
}
dTable.MatchTuplet ( "pseudo_sharding", GetPseudoSharding() ? "1" : "0" );
switch ( GetSecondaryIndexDefault() )
{
case SIDefault_e::FORCE:
dTable.MatchTuplet ( "secondary_indexes", "force" ); break;
case SIDefault_e::ENABLED:
dTable.MatchTuplet ( "secondary_indexes", "1" ); break;
default:
dTable.MatchTuplet ( "secondary_indexes", "0" );
}
dTable.MatchTuplet ( "accurate_aggregation", GetAccurateAggregationDefault() ? "1" : "0" );
dTable.MatchTupletf ( "distinct_precision_threshold", "%d", GetDistinctThreshDefault() );
dTable.MatchTupletFn ( "threads_ex_effective", [] {
StringBuilder_c tBuf;
auto x = GetEffectiveBaseDispatcherTemplate();
auto y = GetEffectivePseudoShardingDispatcherTemplate();
Dispatcher::RenderTemplates ( tBuf, { x, y } );
return tBuf;
} );
dTable.MatchTuplet ( "cluster_user", g_sClusterUser.scstr() );
if ( tStmt.m_iIntParam>=0 ) // that is SHOW GLOBAL VARIABLES
{
dTable.MatchTupletf ( "thread_stack", "%d", Threads::GetMaxCoroStackSize() );
dTable.MatchTupletFn ( "threads_ex", [] {
StringBuilder_c tBuf;
auto x = Dispatcher::GetGlobalBaseDispatcherTemplate();
auto y = Dispatcher::GetGlobalPseudoShardingDispatcherTemplate();
Dispatcher::RenderTemplates ( tBuf, { x, y } );
return tBuf;
} );
Uservar_e eType = tStmt.m_iIntParam==0 ? USERVAR_INT_SET : USERVAR_INT_SET_TMP;
IterateUservars ( [&dTable, eType] ( const NamedRefVectorPair_t &dVar ) {
if ( dVar.second.m_eType==eType )
dTable.MatchTupletf ( dVar.first.cstr(), "%d", dVar.second.m_pVal ? dVar.second.m_pVal->GetLength() : 0 );
});
} else { // that is local (session) variables
dTable.MatchTupletf ( "thread_stack", "%d", session::GetMaxStackSize() );
dTable.MatchTupletFn ( "threads_ex", [] {
StringBuilder_c tBuf;
auto x = ClientTaskInfo_t::Info().GetBaseDispatcherTemplate();
auto y = ClientTaskInfo_t::Info().GetPseudoShardingDispatcherTemplate();
Dispatcher::RenderTemplates ( tBuf, { x, y } );
return tBuf;
});
dTable.MatchTuplet ( "user", session::GetClientSession()->m_sUser.scstr () );
}
// fine
dRows.DataTable ( dTable );
}
template <typename FORMATFN>
static void AddQueryStats ( VectorLike & dStatus, const char * szPrefix, const QueryStats_t & tStats,
//void (*FormatFn)( StringBuilder_c & sBuf, uint64_t uQueries, uint64_t uStat, const char * sType ) )
FORMATFN FormatFn )
{
using namespace QueryStats;
static const char * dStatIntervalNames[INTERVAL_TOTAL] =
{
"1min",
"5min",
"15min",
"total"
};
static const char * dStatTypeNames[TYPE_TOTAL] =
{
"avg",
"min",
"max",
"pct95",
"pct99"
};
StringBuilder_c sBuf;
for ( int i = 0; i < INTERVAL_TOTAL; ++i )
{
if ( dStatus.MatchAddf ( "%s_%s", szPrefix, dStatIntervalNames[i] ) )
{
sBuf.Clear();
{
ScopedComma_c VARIABLE_IS_NOT_USED tRootBlock( sBuf, ", ", R"({"queries":)", "}" );
sBuf << tStats.m_dStats[i].m_uTotalQueries;
for ( int j = 0; j < TYPE_TOTAL; ++j )
FormatFn ( sBuf, tStats.m_dStats[i].m_uTotalQueries,
tStats.m_dStats[i].m_dData[j], dStatTypeNames[j] );
}
dStatus.Add ( sBuf.cstr() );
}
}
}
static void AddQueryTimeStatsToOutput ( VectorLike & dStatus, const char * szPrefix, const QueryStats_t & tQueryTimeStats )
{
AddQueryStats ( dStatus, szPrefix, tQueryTimeStats,
[]( StringBuilder_c & sBuf, uint64_t uQueries, uint64_t uStat, const char * sType )
{
uQueries ? sBuf.Sprintf( R"("%s_sec":%.3F)", sType, uStat ) : sBuf.AppendName( sType ) << R"("-")";
} );
}
static void AddFoundRowsStatsToOutput ( VectorLike & dStatus, const char * szPrefix, const QueryStats_t & tRowsFoundStats )
{
AddQueryStats ( dStatus, szPrefix, tRowsFoundStats,
[]( StringBuilder_c & sBuf, uint64_t uQueries, uint64_t uStat, const char * sType )
{
sBuf.AppendName( sType );
uQueries ? sBuf << uStat : sBuf << R"("-")";
} );
}
static void AddIndexQueryStats ( VectorLike & dStatus, const ServedStats_c& tStats )
{
QueryStats_t tQueryTimeStats, tRowsFoundStats;
tStats.CalculateQueryStats ( tRowsFoundStats, tQueryTimeStats );
AddQueryTimeStatsToOutput ( dStatus, "query_time", tQueryTimeStats );
#ifndef NDEBUG
QueryStats_t tExactQueryTimeStats, tExactRowsFoundStats;
tStats.CalculateQueryStatsExact ( tExactQueryTimeStats, tExactRowsFoundStats );
AddQueryTimeStatsToOutput ( dStatus, "exact_query_time", tQueryTimeStats );
#endif
AddFoundRowsStatsToOutput ( dStatus, "found_rows", tRowsFoundStats );
}
static void AddDiskIndexStatus ( VectorLike & dStatus, const CSphIndex * pIndex, bool bRt, bool bPq )
{
auto iDocs = pIndex->GetStats ().m_iTotalDocuments;
if ( bPq )
{
dStatus.MatchTupletf ( "stored_queries", "%l", iDocs );
} else {
dStatus.MatchTupletf ( "indexed_documents", "%l", iDocs );
dStatus.MatchTupletf ( "indexed_bytes", "%l", pIndex->GetStats ().m_iTotalBytes );
}
const int64_t * pFieldLens = pIndex->GetFieldLens();
if ( pFieldLens )
{
int64_t iTotalTokens = 0;
for ( int i=0; i < pIndex->GetMatchSchema().GetFieldsCount(); ++i )
{
if ( dStatus.MatchAddf ( "field_tokens_%s", pIndex->GetMatchSchema ().GetFieldName ( i ) ) )
dStatus.Addf( "%l", pFieldLens[i] );
iTotalTokens += pFieldLens[i];
}
dStatus.MatchTupletf ( "total_tokens", "%l", iTotalTokens );
}
CSphIndexStatus tStatus;
pIndex->GetStatus ( &tStatus );
dStatus.MatchTupletf ( "ram_bytes", "%l", tStatus.m_iRamUse );
dStatus.MatchTupletf ( "disk_bytes", "%l", tStatus.m_iDiskUse );
if ( !bPq )
{
dStatus.MatchTupletf ( "disk_mapped", "%l", tStatus.m_iMapped );
dStatus.MatchTupletf ( "disk_mapped_cached", "%l", tStatus.m_iMappedResident );
dStatus.MatchTupletf ( "disk_mapped_doclists", "%l", tStatus.m_iMappedDocs );
dStatus.MatchTupletf ( "disk_mapped_cached_doclists", "%l", tStatus.m_iMappedResidentDocs );
dStatus.MatchTupletf ( "disk_mapped_hitlists", "%l", tStatus.m_iMappedHits );
dStatus.MatchTupletf ( "disk_mapped_cached_hitlists", "%l", tStatus.m_iMappedResidentHits );
dStatus.MatchTupletf ( "killed_documents", "%l", tStatus.m_iDead );
dStatus.MatchTupletFn ( "killed_rate", [&tStatus, iDocs] {
StringBuilder_c sPercent;
auto iTotalDocs = iDocs + tStatus.m_iDead;
if ( iTotalDocs )
sPercent.Sprintf ( "%0.2F%%", tStatus.m_iDead * 10000 / iTotalDocs );
else
sPercent << "0.00%";
return CSphString ( sPercent.cstr () );
} );
}
if ( bRt )
{
dStatus.MatchTupletf ( "ram_chunk", "%l", tStatus.m_iRamChunkSize );
dStatus.MatchTupletf ( "ram_chunk_segments_count", "%d", tStatus.m_iNumRamChunks );
dStatus.MatchTupletf ( "disk_chunks", "%d", tStatus.m_iNumChunks );
dStatus.MatchTupletf ( "mem_limit", "%l", tStatus.m_iMemLimit );
dStatus.MatchTupletf ( "mem_limit_rate", "%0.2F%%", PercentOf ( tStatus.m_fSaveRateLimit, 1.0, 2 ) );
dStatus.MatchTupletf ( "ram_bytes_retired", "%l", tStatus.m_iRamRetired );
dStatus.MatchTupletf ( "optimizing", "%l", tStatus.m_iOptimizesCount );
dStatus.MatchTupletf ( "locked", "%d", tStatus.m_iLockCount );
}
if ( bPq )
{
dStatus.MatchTupletf ( "max_stack_need", "%l", tStatus.m_iStackNeed );
dStatus.MatchTupletf ( "average_stack_base", "%l", tStatus.m_iStackBase );
dStatus.MatchTupletf ( "desired_thread_stack", "%l", sphRoundUp ( tStatus.m_iStackNeed + tStatus.m_iStackBase, 128 ) );
dStatus.MatchTupletf ( "locked", "%d", tStatus.m_iLockCount );
}
if ( bRt || bPq )
{
dStatus.MatchTupletf ( "tid", "%l", tStatus.m_iTID );
dStatus.MatchTupletf ( "tid_saved", "%l", tStatus.m_iSavedTID );
}
}
const char * szIndexType ( IndexType_e eType )
{
switch ( eType )
{
case IndexType_e::PLAIN: return "local";
case IndexType_e::TEMPLATE: return "template";
case IndexType_e::RT: return "rt";
case IndexType_e::PERCOLATE: return "percolate";
case IndexType_e::DISTR: return "distributed";
default: return "unknown";
}
}
static void AddPlainIndexStatus ( RowBuffer_i & tOut, const cServedIndexRefPtr_c& pServed, const ServedStats_c& tStats, const CSphString & sName, const CSphString & sPattern )
{
assert ( pServed );
RIdx_c pIndex { pServed };
assert ( pIndex );
VectorLike dStatus ( sPattern );
dStatus.MatchTuplet ( "table_type", szIndexType ( pServed->m_eType ) );
if ( pServed->m_eType != IndexType_e::TEMPLATE )
{
AddDiskIndexStatus ( dStatus, pIndex, pServed->m_eType == IndexType_e::RT, pServed->m_eType == IndexType_e::PERCOLATE );
AddIndexQueryStats ( dStatus, tStats );
}
tOut.DataTable ( dStatus );
}
static void AddDistibutedIndexStatus ( RowBuffer_i & tOut, const cDistributedIndexRefPtr_t& pIndex, const CSphString & sName, const CSphString & sPattern )
{
assert ( pIndex );
VectorLike dStatus ( sPattern );
dStatus.MatchTuplet( "table_type", "distributed" );
AddIndexQueryStats ( dStatus, pIndex->m_tStats );
tOut.DataTable ( dStatus );
}
void HandleMysqlShowIndexStatus ( RowBuffer_i& tOut, const SqlStmt_t& tStmt )
{
CSphString sError;
auto pServed = GetServed ( tStmt.m_sIndex );
int iChunk = tStmt.m_iIntParam;
if ( tStmt.m_dIntSubkeys.GetLength() >= 1 )
iChunk = tStmt.m_dIntSubkeys[0];
if ( pServed )
{
if ( iChunk >= 0 && pServed->m_eType == IndexType_e::RT )
{
RIdx_T<const RtIndex_i*> ( pServed )->ProcessDiskChunk ( iChunk, [&tOut, &tStmt] ( const CSphIndex* pIndex ) {
if ( !pIndex )
{
tOut.Error ( "SHOW TABLE STATUS requires an existing table" );
return;
}
VectorLike dStatus ( tStmt.m_sStringParam );
AddDiskIndexStatus ( dStatus, pIndex, false, false );
tOut.DataTable ( dStatus );
} );
} else
AddPlainIndexStatus ( tOut, pServed, *pServed->m_pStats, tStmt.m_sIndex, tStmt.m_sStringParam );
return;
}
auto pIndex = GetDistr ( tStmt.m_sIndex );
if ( pIndex )
AddDistibutedIndexStatus ( tOut, pIndex, tStmt.m_sIndex, tStmt.m_sStringParam );
else
tOut.Error ( "SHOW TABLE STATUS requires an existing table" );
}
static bool AddFederatedIndexStatusHeader ( RowBuffer_i& tOut )
{
return tOut.HeadOfStrings ( { "Name", "Engine", "Version", "Row_format", "Rows", "Avg_row_length", "Data_length", "Max_data_length", "Index_length", "Data_free", "Auto_increment", "Create_time", "Update_time", "Check_time", "Collation", "Checksum", "Create_options", "Comment" } );
}
static void AddFederatedIndexStatusLine ( const CSphSourceStats& tStats, const CSphString& sName, RowBuffer_i& tOut )
{
tOut.PutString ( sName ); // Name
tOut.PutString ( "InnoDB" ); // Engine
tOut.PutString ( "10" ); // Version
tOut.PutString ( "Dynamic" ); // Row_format
tOut.PutNumAsString ( tStats.m_iTotalDocuments ); // Rows
tOut.PutString ( "4096" ); // Avg_row_length
tOut.PutString ( "0" ); // Data_length
tOut.PutString ( "0" ); // Max_data_length
tOut.PutString ( "0" ); // Index_length
tOut.PutString ( "0" ); // Data_free
tOut.PutString ( "5" ); // Auto_increment
tOut.PutNULL(); // Create_time
tOut.PutNULL(); // Update_time
tOut.PutNULL(); // Check_time
tOut.PutString ( "utf8" ); // Collation
tOut.PutNULL(); // Checksum
tOut.PutString ( "" ); // Create_options
tOut.PutString ( "" ); // Comment
tOut.Commit();
}
void HandleMysqlShowFederatedIndexStatus ( RowBuffer_i & tOut, const SqlStmt_t & tStmt )
{
CSphString sError;
if ( !AddFederatedIndexStatusHeader ( tOut ) )
return;
CheckLike tSelector { tStmt.m_sStringParam.cstr() };
auto dIndexes = GetAllServedIndexes();
bool bWithClusters = ClusterFlavour();
// fake stat for distrs
CSphSourceStats tFakeStats;
tFakeStats.m_iTotalDocuments = 1000; // TODO: check is it worth to query that number from agents
for ( const NamedIndexType_t& tIndex : dIndexes )
{
CSphString sFullName;
if ( bWithClusters && !tIndex.m_sCluster.IsEmpty () )
sFullName.SetSprintf ("%s:%s", tIndex.m_sCluster.cstr(), tIndex.m_sName.cstr());
const CSphString& sName = ( bWithClusters && !tIndex.m_sCluster.IsEmpty () ) ? sFullName : tIndex.m_sName;
if ( !tSelector.Match ( sName.cstr() ) )
continue;
if ( tIndex.m_eType == IndexType_e::DISTR )
AddFederatedIndexStatusLine ( tFakeStats, sName, tOut );
else {
auto pServed = GetServed ( tIndex.m_sName );
if ( !pServed )
continue; // really rare case when between GetAllServedIndexes and that moment table was removed.
RIdx_c pIndex { pServed };
assert ( pIndex );
AddFederatedIndexStatusLine ( pIndex->GetStats(), sName, tOut );
}
}
tOut.Eof ();
}
void PutIndexStatus ( RowBuffer_i & tOut, const CSphIndex * pIndex )
{
tOut.PutString ( pIndex->GetFilebase () );
auto & tStats = pIndex->GetStats ();
tOut.PutNumAsString ( tStats.m_iTotalDocuments );
tOut.PutNumAsString ( tStats.m_iTotalBytes );
CSphIndexStatus tStatus;
pIndex->GetStatus ( &tStatus );
tOut.PutNumAsString ( tStatus.m_iRamUse );
tOut.PutNumAsString ( tStatus.m_iDiskUse );
tOut.PutNumAsString ( tStatus.m_iMapped );
tOut.PutNumAsString ( tStatus.m_iMappedResident );
tOut.PutNumAsString ( tStatus.m_iMappedDocs );
tOut.PutNumAsString ( tStatus.m_iMappedResidentDocs );
tOut.PutNumAsString ( tStatus.m_iMappedHits );
tOut.PutNumAsString ( tStatus.m_iMappedResidentHits );
tOut.PutNumAsString ( tStatus.m_iDead );
}
void HandleSelectIndexStatus ( RowBuffer_i & tOut, const SqlStmt_t * pStmt )
{
tOut.HeadBegin ();
tOut.HeadColumn ( "chunk_id", MYSQL_COL_LONG );
tOut.HeadColumn ( "base_name" );
tOut.HeadColumn ( "indexed_documents", MYSQL_COL_LONG );
tOut.HeadColumn ( "indexed_bytes", MYSQL_COL_LONGLONG );
tOut.HeadColumn ( "ram_bytes", MYSQL_COL_LONGLONG );
tOut.HeadColumn ( "disk_bytes", MYSQL_COL_LONGLONG );
tOut.HeadColumn ( "disk_mapped", MYSQL_COL_LONGLONG );
tOut.HeadColumn ( "disk_mapped_cached", MYSQL_COL_LONGLONG );
tOut.HeadColumn ( "disk_mapped_doclists", MYSQL_COL_LONGLONG );
tOut.HeadColumn ( "disk_mapped_cached_doclists", MYSQL_COL_LONGLONG );
tOut.HeadColumn ( "disk_mapped_hitlists", MYSQL_COL_LONGLONG );
tOut.HeadColumn ( "disk_mapped_cached_hitlists", MYSQL_COL_LONGLONG );
tOut.HeadColumn ( "killed_documents", MYSQL_COL_LONGLONG );
if ( !tOut.HeadEnd () )
return;
const auto & tStmt = *pStmt;
auto pServed = GetServed ( tStmt.m_sIndex );
if ( !ServedDesc_t::IsLocal ( pServed ) )
{
tOut.Error ( "select TABLE.@status requires an existing table" );
return;
}
RIdx_c pIndex { pServed };
if ( pIndex->IsRT () )
{
auto* pRtIndex = static_cast<const RtIndex_i*> ( pIndex.Ptr() );
int iChunk = 0;
bool bKeepIteration = true;
while ( bKeepIteration )
{
pRtIndex->ProcessDiskChunk (iChunk,[&bKeepIteration, &tOut] (const CSphIndex* pChunk) {
if ( !pChunk )
{
bKeepIteration = false;
return;
}
tOut.PutNumAsString ( pChunk->m_iChunk );
PutIndexStatus ( tOut, pChunk );
if ( !tOut.Commit () )
{
bKeepIteration = false;
return;
}
});
++iChunk;
}
} else {
tOut.PutNumAsString ( 0 ); // dummy 'chunk' of non-rt
PutIndexStatus ( tOut, pIndex );
tOut.Commit ();
}
tOut.Eof();
}
void HandleMysqlShowIndexSettings ( RowBuffer_i & tOut, const SqlStmt_t & tStmt )
{
CSphString sError;
auto pServed = GetServed ( tStmt.m_sIndex );
if ( !pServed )
{
tOut.Error ( "SHOW TABLE SETTINGS requires an existing table" );
return;
}
int iChunk = tStmt.m_iIntParam;
if ( tStmt.m_dIntSubkeys.GetLength ()>=1 )
iChunk = (int) tStmt.m_dIntSubkeys[0];
auto fnShowSettings = [&tOut, szStmt=tStmt.m_sStmt] ( const CSphIndex* pIndex )
{
if ( !pIndex )
{
tOut.Error ( "SHOW TABLE SETTINGS requires an existing table" );
return;
}
if ( !tOut.HeadOfStrings ( { "Variable_name", "Value" } ) )
return;
StringBuilder_c tBuf;
std::unique_ptr<FilenameBuilder_i> pFilenameBuilder = CreateFilenameBuilder ( pIndex->GetName () );
DumpSettings ( tBuf, *pIndex, pFilenameBuilder.get () );
tOut.DataTuplet ( "settings", tBuf.cstr () );
tOut.Eof ();
};
if ( iChunk >= 0 && pServed->m_eType == IndexType_e::RT )
RIdx_T<const RtIndex_i*> ( pServed )->ProcessDiskChunk ( iChunk, fnShowSettings );
else
fnShowSettings ( RIdx_c(pServed) );
}
void HandleMysqlShowProfile ( RowBuffer_i & tOut, const QueryProfile_c & p, bool bMoreResultsFollow )
{
#define SPH_QUERY_STATE(_name,_desc) _desc,
static const char * dStates [ SPH_QSTATE_TOTAL ] = { SPH_QUERY_STATES };
#undef SPH_QUERY_STATES
tOut.HeadBegin ();
tOut.HeadColumn ( "Status" );
tOut.HeadColumn ( "Duration" );
tOut.HeadColumn ( "Switches" );
tOut.HeadColumn ( "Percent" );
tOut.HeadEnd ( bMoreResultsFollow );
int64_t tmTotal = 0;
int iCount = 0;
for ( int i=0; i<SPH_QSTATE_TOTAL; i++ )
{
if ( p.m_dSwitches[i]<=0 )
continue;
tmTotal += p.m_tmTotal[i];
iCount += p.m_dSwitches[i];
}
char sTime[32];
for ( int i=0; i<SPH_QSTATE_TOTAL; ++i )
{
if ( p.m_dSwitches[i]<=0 )
continue;
snprintf ( sTime, sizeof(sTime), "%d.%06d", int(p.m_tmTotal[i]/1000000), int(p.m_tmTotal[i]%1000000) );
tOut.PutString ( dStates[i] );
tOut.PutString ( sTime );
tOut.PutNumAsString ( p.m_dSwitches[i] );
if ( tmTotal )
tOut.PutFloatAsString ( 100.0f * p.m_tmTotal[i]/tmTotal, "%.2f" );
else
tOut.PutString ( "INF" );
if ( !tOut.Commit() )
return;
}
snprintf ( sTime, sizeof(sTime), "%d.%06d", int(tmTotal/1000000), int(tmTotal%1000000) );
tOut.PutString ( "total" );
tOut.PutString ( sTime );
tOut.PutNumAsString ( iCount );
tOut.PutString ( "0" );
tOut.Commit();
tOut.Eof ( bMoreResultsFollow );
}
static void AddAttrToIndex ( const SqlStmt_t & tStmt, CSphIndex * pIdx, CSphString & sError, bool bModify )
{
CSphString sAttrToAdd = tStmt.m_sAlterAttr;
sAttrToAdd.ToLower();
bool bIndexed = tStmt.m_uFieldFlags & CSphColumnInfo::FIELD_INDEXED;
bool bStored = tStmt.m_uFieldFlags & CSphColumnInfo::FIELD_STORED;
bool bAttribute = tStmt.m_uFieldFlags & CSphColumnInfo::FIELD_IS_ATTRIBUTE; // beware, m.b. true only for strings
auto pHasAttr = pIdx->GetMatchSchema ().GetAttr ( sAttrToAdd.cstr () );
bool bHasField = pIdx->GetMatchSchema ().GetFieldIndex ( sAttrToAdd.cstr () )!=-1;
const bool bInt2Bigint = pHasAttr
&& pHasAttr->m_eAttrType==SPH_ATTR_INTEGER
&& pHasAttr->m_eEngine==AttrEngine_e::DEFAULT
&& tStmt.m_eAlterColType==SPH_ATTR_BIGINT
&& tStmt.m_eEngine==AttrEngine_e::DEFAULT;
if ( !bIndexed && pHasAttr )
{
if ( !bModify || !bInt2Bigint )
{
sError.SetSprintf ( "'%s' attribute already in schema", sAttrToAdd.cstr () );
return;
}
}
if ( bModify )
{
if ( !pHasAttr )
{
sError.SetSprintf ( "attribute '%s' does not exist", sAttrToAdd.cstr() );
return;
}
if ( !bInt2Bigint )
{
sError.SetSprintf ( "attribute '%s': only alter from rowise int to bigint supported", sAttrToAdd.cstr () );
return;
}
}
if ( bIndexed && bHasField )
{
sError.SetSprintf ( "'%s' field already in schema", sAttrToAdd.cstr() );
return;
}
if ( !bIndexed && bHasField && tStmt.m_eAlterColType!=SPH_ATTR_STRING )
{
sError.SetSprintf ( "cannot add attribute that shadows '%s' field", sAttrToAdd.cstr () );
return;
}
AttrAddRemoveCtx_t tCtx;
tCtx.m_sName = sAttrToAdd;
tCtx.m_eType = tStmt.m_eAlterColType;
tCtx.m_iBits = tStmt.m_iBits;
tCtx.m_uFlags = tStmt.m_uAttrFlags;
tCtx.m_eEngine = tStmt.m_eEngine;
tCtx.m_tKNN = tStmt.m_tAlterKNN;
if ( bIndexed || bStored )
{
pIdx->AddRemoveField ( true, sAttrToAdd, tStmt.m_uFieldFlags, sError );
if ( bAttribute )
pIdx->AddRemoveAttribute ( true, tCtx, sError );
}
else
pIdx->AddRemoveAttribute ( true, tCtx, sError );
}
static void RemoveAttrFromIndex ( const SqlStmt_t& tStmt, CSphIndex* pIdx, CSphString& sError )
{
CSphString sAttrToRemove = tStmt.m_sAlterAttr;
sAttrToRemove.ToLower();
auto pAttr = pIdx->GetMatchSchema().GetAttr ( sAttrToRemove.cstr() );
auto pField = pIdx->GetMatchSchema().GetField ( sAttrToRemove.cstr() );
if ( !pAttr && !pField )
{
sError.SetSprintf ( "attribute '%s' does not exist", sAttrToRemove.cstr() );
return;
}
if ( pAttr && ( sAttrToRemove==sphGetDocidName () || sphIsInternalAttr ( *pAttr ) ) )
{
sError.SetSprintf ( "unable to remove built-in attribute '%s'", sAttrToRemove.cstr() );
return;
}
if ( pAttr && pIdx->GetMatchSchema().GetAttrsCount()==1 )
{
sError.SetSprintf ( "unable to remove last attribute '%s'", sAttrToRemove.cstr() );
return;
}
if ( pAttr )
{
AttrAddRemoveCtx_t tCtx;
tCtx.m_sName = sAttrToRemove;
tCtx.m_eType = pAttr->m_eAttrType;
pIdx->AddRemoveAttribute ( false, tCtx, sError );
}
if ( pField )
pIdx->AddRemoveField ( false, sAttrToRemove, 0, sError );
}
enum class Alter_e
{
AddColumn,
DropColumn,
ModifyColumn,
RebuildSI,
};
static void HandleMysqlAlter ( RowBuffer_i & tOut, const SqlStmt_t & tStmt, Alter_e eAction )
{
if ( !sphCheckWeCanModify ( tOut ) )
return;
MEMORY ( MEM_SQL_ALTER );
SearchFailuresLog_c dErrors;
CSphString sError;
if ( eAction==Alter_e::AddColumn && tStmt.m_eAlterColType==SPH_ATTR_NONE )
{
sError.SetSprintf ( "unsupported attribute type '%d'", tStmt.m_eAlterColType );
tOut.Error ( sError.cstr() );
return;
}
StrVec_t dNames;
ParseIndexList ( tStmt.m_sIndex, dNames );
if ( dNames.IsEmpty() )
{
sError.SetSprintf ( "no such table '%s'", tStmt.m_sIndex.cstr() );
tOut.Error ( sError.cstr() );
return;
}
for ( const auto & sName : dNames )
if ( !g_pLocalIndexes->Contains ( sName )
&& g_pDistIndexes->Contains ( sName ) )
{
sError.SetSprintf ( "ALTER is only supported for local (not distributed) tables" );
tOut.Error ( sError.cstr () );
return;
}
for ( const auto &sName : dNames )
{
auto pServed = GetServed ( sName );
if ( !pServed )
{
dErrors.Submit ( sName, nullptr, "unknown local table in ALTER request" );
continue;
}
// cluster does not implement ALTER for now
auto tCluster = IsPartOfCluster ( pServed );
if ( tCluster )
{
dErrors.SubmitEx ( sName, nullptr, "is part of cluster %s, ALTER is not supported for tables in cluster", tCluster->cstr() );
continue;
}
CSphString sAddError;
if ( eAction==Alter_e::AddColumn || eAction == Alter_e::ModifyColumn )
AddAttrToIndex ( tStmt, WIdx_c ( pServed ), sAddError, eAction == Alter_e::ModifyColumn );
else if ( eAction==Alter_e::DropColumn )
RemoveAttrFromIndex ( tStmt, WIdx_c ( pServed ), sAddError );
else if ( eAction==Alter_e::RebuildSI )
{
WIdx_c ( pServed )->AlterSI ( sAddError );
}
if ( !sAddError.IsEmpty() )
dErrors.Submit ( sName, nullptr, sAddError.cstr() );
}
if ( !dErrors.IsEmpty() )
{
StringBuilder_c sReport;
dErrors.BuildReport ( sReport );
tOut.Error ( sReport.cstr() );
return;
}
tOut.Ok();
}
static bool PrepareReconfigure ( const char * szIndex, const CSphConfigSection & hIndex, CSphReconfigureSettings & tSettings, StrVec_t * pWarnings, CSphString & sError )
{
std::unique_ptr<FilenameBuilder_i> pFilenameBuilder = CreateFilenameBuilder ( szIndex );
{
CSphString sWarning;
tSettings.m_tTokenizer.Setup ( hIndex, sWarning );
tSettings.m_tDict.Setup ( hIndex, pFilenameBuilder.get(), sWarning );
tSettings.m_tFieldFilter.Setup ( hIndex, sWarning );
tSettings.m_tMutableSettings.Load ( hIndex, false, nullptr );
if ( pWarnings && !sWarning.IsEmpty() )
pWarnings->Add(sWarning);
}
if ( !sphRTSchemaConfigure ( hIndex, tSettings.m_tSchema, tSettings.m_tIndex, pWarnings, sError, !tSettings.m_bChangeSchema, false ) )
{
sError.SetSprintf ( "failed to parse table '%s' schema, error: '%s'", szIndex, sError.cstr() );
return false;
}
{
CSphString sWarning;
if ( !tSettings.m_tIndex.Setup ( hIndex, szIndex, sWarning, sError ) )
{
sError.SetSprintf ( "failed to parse table '%s' settings, error: '%s'", szIndex, sError.cstr() );
return false;
}
if ( pWarnings && !sWarning.IsEmpty() )
pWarnings->Add(sWarning);
}
tSettings.m_tSchema.SetupFlags ( tSettings.m_tIndex, false, nullptr );
return CheckStoredFields ( tSettings.m_tSchema, tSettings.m_tIndex, sError );
}
static bool PrepareReconfigure ( const char * szIndex, CSphReconfigureSettings & tSettings, StrVec_t * pWarnings, CSphString & sError )
{
CSphConfig hCfg;
auto [bChanged, dConfig] = FetchAndCheckIfChanged ( g_sConfigFile );
if ( !ParseConfig ( &hCfg, g_sConfigFile, dConfig ) )
{
sError.SetSprintf ( "failed to parse config file '%s': %s; using previous settings", g_sConfigFile.cstr (), TlsMsg::szError() );
return false;
}
if ( !hCfg.Exists ( "index" ) )
{
sError.SetSprintf ( "failed to find any table in config file '%s'; using previous settings", g_sConfigFile.cstr () );
return false;
}
if ( !hCfg["index"].Exists ( szIndex ) )
{
sError.SetSprintf ( "failed to find table '%s' in config file '%s'; using previous settings", szIndex, g_sConfigFile.cstr () );
return false;
}
return PrepareReconfigure ( szIndex, hCfg["index"][szIndex], tSettings, pWarnings, sError );
}
// ALTER RTINDEX/TABLE <idx> RECONFIGURE
static void HandleMysqlReconfigure ( RowBuffer_i & tOut, const SqlStmt_t & tStmt, CSphString & sWarning )
{
if ( !sphCheckWeCanModify ( tOut ) )
return;
MEMORY ( MEM_SQL_ALTER );
if ( IsConfigless() )
{
tOut.Error ( "ALTER RECONFIGURE is not supported in RT mode" );
return;
}
const char * szIndex = tStmt.m_sIndex.cstr();
auto pServed = GetServed ( tStmt.m_sIndex );
if ( !ServedDesc_t::IsMutable ( pServed ) )
{
tOut.ErrorEx ( "'%s' is absent, or does not support ALTER", szIndex );
return;
}
CSphString sError;
StrVec_t dWarnings;
CSphReconfigureSettings tSettings;
CSphReconfigureSetup tSetup;
if ( !PrepareReconfigure ( szIndex, tSettings, &dWarnings, sError ) )
{
tOut.Error ( sError.cstr () );
return;
}
WIdx_T<RtIndex_i*> pRT { pServed };
if ( !pRT->IsSameSettings ( tSettings, tSetup, dWarnings, sError ) && sError.IsEmpty() )
{
if ( !pRT->Reconfigure ( tSetup ) )
{
sError.SetSprintf ( "table '%s': reconfigure failed; TABLE UNUSABLE (%s)", tStmt.m_sIndex.cstr(), pRT->GetLastError().cstr() );
g_pLocalIndexes->Delete ( tStmt.m_sIndex );
}
}
sWarning = ConcatWarnings ( dWarnings );
if ( sError.IsEmpty() )
tOut.Ok ( 0, dWarnings.GetLength() );
else
tOut.Error ( sError.cstr() );
}
static bool ApplyIndexKillList ( const CSphIndex * pIndex, CSphString & sWarning, CSphString & sError, bool bShowMessage = false );
// STMT_ALTER_KLIST_TARGET: ALTER TABLE index KILLLIST_TARGET = 'string'
static void HandleMysqlAlterKlist ( RowBuffer_i & tOut, const SqlStmt_t & tStmt, CSphString & sWarning )
{
if ( !sphCheckWeCanModify ( tOut ) )
return;
MEMORY ( MEM_SQL_ALTER );
CSphString sError;
KillListTargets_c tNewTargets;
if ( !tNewTargets.Parse ( tStmt.m_sAlterOption, tStmt.m_sIndex.cstr(), sError ) )
{
tOut.Error ( sError.cstr() );
return;
}
auto pServed = GetServed ( tStmt.m_sIndex.cstr () );
if ( !pServed )
{
if ( g_pDistIndexes->Contains ( tStmt.m_sIndex ) )
sError.SetSprintf ( "ALTER is only supported for local (not distributed) tables" );
else
sError.SetSprintf ( "table '%s' not found", tStmt.m_sIndex.cstr () );
}
else if ( ServedDesc_t::IsMutable ( pServed ) )
sError.SetSprintf ( "'%s' does not support ALTER (real-time or percolate)", tStmt.m_sIndex.cstr () );
if ( !sError.IsEmpty () )
{
tOut.Error ( sError.cstr () );
return;
}
WIdx_c pIdx { pServed };
if ( !pIdx->AlterKillListTarget ( tNewTargets, sError ) )
{
tOut.Error ( sError.cstr() );
return;
}
// apply killlist to new targets
if ( !ApplyIndexKillList ( pIdx, sWarning, sError ) )
{
tOut.Error ( sError.cstr() );
return;
}
if ( sError.IsEmpty() )
tOut.Ok();
else
tOut.Error ( sError.cstr() );
}
// remove all old files these are not in the list of current index files
static void RemoveOutdatedFiles ( RtIndex_i * pRtIndex, StrVec_t & dOldFiles )
{
StrVec_t dNewFiles;
pRtIndex->GetIndexFiles ( dNewFiles, dNewFiles );
dOldFiles.Uniq();
sph::StringSet hNewFiles ( dNewFiles );
for ( const CSphString & tOldName : dOldFiles )
{
if ( hNewFiles[tOldName] )
continue;
if ( sphIsReadable ( tOldName ) )
::unlink ( tOldName.cstr() );
}
}
// STMT_ALTER_INDEX_SETTINGS: ALTER TABLE index [ident = 'string']*
static void HandleMysqlAlterIndexSettings ( RowBuffer_i & tOut, const SqlStmt_t & tStmt, CSphString & sWarning )
{
if ( !sphCheckWeCanModify ( tOut ) )
return;
MEMORY ( MEM_SQL_ALTER );
CSphString sError;
if ( !IsConfigless() )
{
sError = "ALTER TABLE requires data_dir to be set in the config file";
tOut.Error ( sError.cstr() );
return;
}
auto pServed = GetServed ( tStmt.m_sIndex.cstr() );
if ( !pServed || pServed->m_eType != IndexType_e::RT )
{
tOut.ErrorEx ( "table '%s' is not found, or not real-time", tStmt.m_sIndex.cstr() );
return;
}
// cluster does not implement ALTER for now
auto tCluster = IsPartOfCluster ( pServed );
if ( tCluster )
{
tOut.ErrorEx ( "table '%s' is part of cluster %s, ALTER is not supported for tables in cluster", tStmt.m_sIndex.cstr(), tCluster->cstr() );
return;
}
WIdx_T<RtIndex_i*> pRtIndex { pServed };
// get all table settings as a string
CSphString sCreateTable = BuildCreateTable ( pRtIndex->GetName(), pRtIndex, pRtIndex->GetInternalSchema() );
CSphVector<SqlStmt_t> dCreateTableStmts;
if ( !ParseDdl ( FromStr ( sCreateTable ), dCreateTableStmts, sError ) )
{
tOut.Error ( sError.cstr() );
return;
}
if ( dCreateTableStmts.GetLength()!=1 )
{
tOut.Error ( "Unable to alter table settings" );
return;
}
int iSuffix = pRtIndex->GetChunkId();
CSphString sIndexPath = GetPathOnly ( pRtIndex->GetFilebase() );
// parse the options string to old-style config hash
std::unique_ptr<IndexSettingsContainer_i> pContainer { CreateIndexSettingsContainer() };
pContainer->Populate ( dCreateTableStmts[0].m_tCreateTable, false );
// force override for old options options from alter should override currect options
for ( const auto & i : tStmt.m_tCreateTable.m_dOpts )
{
pContainer->RemoveKeys ( i.m_sName );
}
// should be able to remove settings with the empty option or remove the prev options by the last empty option
for ( const auto & i : tStmt.m_tCreateTable.m_dOpts )
{
pContainer->AddOption ( i.m_sName, i.m_sValue, true );
}
if ( !pContainer->CheckPaths() )
{
tOut.Error ( pContainer->GetError().cstr() );
return;
}
// keep list of index files prior to alter
StrVec_t dOldFiles;
pRtIndex->GetIndexFiles ( dOldFiles, dOldFiles );
if ( !pContainer->CopyExternalFiles ( sIndexPath, iSuffix ) )
{
tOut.Error ( pContainer->GetError().cstr () );
return;
}
StrVec_t dWarnings;
CSphReconfigureSettings tSettings;
if ( !PrepareReconfigure ( tStmt.m_sIndex.cstr(), pContainer->AsCfg(), tSettings, &dWarnings, sError ) )
{
tOut.Error ( sError.cstr () );
return;
}
CSphReconfigureSetup tSetup;
bool bSame = pRtIndex->IsSameSettings ( tSettings, tSetup, dWarnings, sError );
sWarning = ConcatWarnings(dWarnings);
if ( !bSame && sError.IsEmpty() )
{
bool bOk = pRtIndex->Reconfigure(tSetup);
if ( !bOk )
{
sError.SetSprintf ( "table '%s': alter failed; TABLE UNUSABLE (%s)", tStmt.m_sIndex.cstr(), pRtIndex->GetLastError().cstr() );
g_pLocalIndexes->Delete ( tStmt.m_sIndex );
}
}
if ( sError.IsEmpty() )
{
// all ok, delete old files
RemoveOutdatedFiles ( pRtIndex, dOldFiles );
pContainer->ResetCleanup();
tOut.Ok ( 0, dWarnings.GetLength() );
}
else
tOut.Error ( sError.cstr() );
}
// STMT_SHOW_PLAN: SHOW PLAN
static void HandleMysqlShowPlan ( RowBuffer_i & tOut, const QueryProfile_c & p, bool bMoreResultsFollow, bool bDot )
{
tOut.HeadBegin ();
tOut.HeadColumn ( "Variable" );
tOut.HeadColumn ( "Value" );
tOut.HeadEnd ( bMoreResultsFollow );
tOut.PutString ( "transformed_tree" );
StringBuilder_c sPlan;
sph::RenderBsonPlan ( sPlan, bson::MakeHandle ( p.m_dPlan ), bDot );
tOut.PutString ( sPlan );
tOut.Commit();
tOut.Eof ( bMoreResultsFollow );
}
// for seamless we create new index and copy it's settings from previous definition. Indexes are NOT linked
// for greedy we just make light clone (original index add-reffed).
ServedIndexRefPtr_c MakeCloneForRotation ( const cServedIndexRefPtr_c& pSource, const CSphString& sIndex )
{
assert ( pSource->m_eType == IndexType_e::PLAIN );
auto pRes = MakeServedIndex();
LightClone ( pRes, pSource );
if ( g_bSeamlessRotate )
{
pRes->SetStatsFrom ( *pSource );
auto pIdx = sphCreateIndexPhrase ( sIndex, pRes->m_sIndexPath );
pIdx->m_iExpansionLimit = g_iExpansionLimit;
pIdx->SetMutableSettings ( pRes->m_tSettings );
pIdx->SetGlobalIDFPath ( pRes->m_sGlobalIDFPath );
pIdx->SetCacheSize ( g_iMaxCachedDocs, g_iMaxCachedHits );
pRes->SetIdx ( std::move ( pIdx ) );
} else
pRes->SetIdxAndStatsFrom ( *pSource );
return pRes;
}
bool LockIndex ( const ServedIndex_c& tIdx, CSphIndex* pIdx, CSphString& sError )
{
if ( !g_bOptNoLock && !pIdx->Lock() )
{
sError.SetSprintf ( "lock: %s", pIdx->GetLastError().cstr() );
return false;
}
tIdx.UpdateMass();
return true;
}
static bool LimitedRotateIndexMT ( ServedIndexRefPtr_c& pNewServed, const CSphString& sIndex, StrVec_t& dWarnings, CSphString& sError ) EXCLUDES ( MainThread );
static bool RotateIndexGreedy ( const ServedIndex_c& tServed, const char* szIndex, CSphString& sError ) REQUIRES ( tServed.m_pIndex->Locker() );
static bool SwitchoverIndexSeamless ( const cServedIndexRefPtr_c& pServed, const char* szIndex, const CSphString& sBase, const CSphString& sNewPath, StrVec_t& dWarnings, CSphString& sError ) EXCLUDES ( MainThread );
static bool SwitchoverIndexGreedy ( CSphIndex* pIdx, const char* szIndex, const CSphString& sBase, const CSphString& sNewPath, StrVec_t& dWarnings, CSphString& sError ) EXCLUDES ( MainThread );
static void HandleMysqlReloadIndex ( RowBuffer_i & tOut, const SqlStmt_t & tStmt, CSphString & sWarning )
{
// preflight check
cServedIndexRefPtr_c pServed = GetServed ( tStmt.m_sIndex );
if ( !pServed )
return tOut.ErrorEx ( "unknown local table '%s'", tStmt.m_sIndex.cstr() );
if ( ServedDesc_t::IsMutable ( pServed ) )
return tOut.Error ( "can not reload real-time or percolate table" );
assert ( pServed->m_eType == IndexType_e::PLAIN );
CSphString sError;
StrVec_t dWarnings;
AT_SCOPE_EXIT ( [&sWarning, &dWarnings]() {
if ( dWarnings.IsEmpty() )
return;
StringBuilder_c sWarn ( "; " );
dWarnings.for_each ( [&sWarn] ( const auto& i ) { sWarn << i; } );
sWarn.MoveTo ( sWarning );
});
if ( tStmt.m_iIntParam == 1 )
{
if ( tStmt.m_sStringParam.IsEmpty() )
return tOut.Error ( "reload with switchover requires explicit new path to the index" );
// here switchover=1 logic goes...
if ( g_bSeamlessRotate )
{
if ( !SwitchoverIndexSeamless ( pServed, tStmt.m_sIndex.cstr(), pServed->m_sIndexPath, tStmt.m_sStringParam, dWarnings, sError ) )
g_pLocalIndexes->Delete ( tStmt.m_sIndex ); // since it unusable - no sense just to disable it.
} else
{
WIdx_c WLock { pServed };
CSphIndex* pIdx = UnlockedHazardIdxFromServed ( *pServed );
if ( !SwitchoverIndexGreedy ( pIdx, tStmt.m_sIndex.cstr(), pServed->m_sIndexPath, tStmt.m_sStringParam, dWarnings, sError ) )
g_pLocalIndexes->Delete ( tStmt.m_sIndex ); // since it unusable - no sense just to disable it.
// fixme! SwitchoverIndexGreedy does prealloc. Do we need to perform/signal preload also?
else
LockIndex ( *pServed, pIdx, sError );
}
if ( sError.IsEmpty() )
return tOut.Ok();
sphWarning ( "%s", sError.cstr() );
return tOut.Error ( sError.cstr() );
}
assert ( tStmt.m_iIntParam!=1 );
if ( !tStmt.m_sStringParam.IsEmpty () )
{
// try move files from arbitrary path to current index path before rotate, if needed.
// fixme! what about concurrency? if 2 sessions simultaneously ask to rotate,
// or if we have unapplied rotates from indexer - seems that it will garbage .new files?
IndexFiles_c sIndexFiles ( pServed->m_sIndexPath );
if ( !sIndexFiles.RelocateToNew ( tStmt.m_sStringParam ) )
return tOut.Error ( sIndexFiles.ErrorMsg () );
}
if ( g_bSeamlessRotate )
{
ServedIndexRefPtr_c pNewServed = MakeCloneForRotation ( pServed, tStmt.m_sIndex );
if ( !LimitedRotateIndexMT ( pNewServed, tStmt.m_sIndex, dWarnings, sError ) )
{
sphWarning ( "%s", sError.cstr() );
return tOut.Error ( sError.cstr() );
}
} else {
WIdx_c WLock { pServed };
if ( !RotateIndexGreedy ( *pServed, tStmt.m_sIndex.cstr(), sError ) )
{
sphWarning ( "%s", sError.cstr() );
tOut.Error ( sError.cstr() );
g_pLocalIndexes->Delete ( tStmt.m_sIndex ); // since it unusable - no sense just to disable it.
// fixme! RotateIndexGreedy does prealloc. Do we need to perform/signal preload also?
return;
}
}
tOut.Ok();
}
void HandleMysqlExplain ( RowBuffer_i & tOut, const SqlStmt_t & tStmt, bool bDot )
{
CSphString sProc ( tStmt.m_sCallProc );
if ( sProc.ToLower()!="query" )
{
tOut.ErrorEx ( "no such explain procedure %s", tStmt.m_sCallProc.cstr () );
return;
}
auto pServed = GetServed ( tStmt.m_sIndex );
if ( !pServed )
{
tOut.ErrorEx ( "unknown local table '%s'", tStmt.m_sIndex.cstr ());
return;
}
TlsMsg::ResetErr (); // reset error
auto dPlan = RIdx_c ( pServed )->ExplainQuery ( tStmt.m_tQuery.m_sQuery );
if ( TlsMsg::HasErr ())
{
tOut.Error ( TlsMsg::szError ());
return;
}
StringBuilder_c sRes;
sph::RenderBsonPlan ( sRes, bson::MakeHandle ( dPlan ), bDot );
tOut.HeadBegin ();
tOut.HeadColumn ( "Variable" );
tOut.HeadColumn ( "Value" );
tOut.HeadEnd ();
tOut.PutString ( "transformed_tree" );
tOut.PutString ( sRes );
tOut.Commit();
tOut.Eof ();
}
void HandleMysqlImportTable ( RowBuffer_i & tOut, const SqlStmt_t & tStmt, CSphString & sWarning )
{
if ( !sphCheckWeCanModify ( tOut ) )
return;
CSphString sError;
if ( !IsConfigless() )
{
sError = "IMPORT TABLE requires data_dir to be set in the config file";
tOut.Error ( sError.cstr() );
return;
}
if ( IndexIsServed ( tStmt.m_sIndex ) )
{
sError.SetSprintf ( "table '%s' already exists", tStmt.m_sIndex.cstr() );
tOut.Error ( sError.cstr() );
return;
}
bool bPQ = false;
StrVec_t dWarnings;
if ( !CopyIndexFiles ( tStmt.m_sIndex, tStmt.m_sStringParam, bPQ, dWarnings, sError ) )
{
sError.SetSprintf ( "unable to import table '%s': %s", tStmt.m_sIndex.cstr(), sError.cstr() );
tOut.Error ( sError.cstr() );
return;
}
if ( !AddExistingIndexConfigless ( tStmt.m_sIndex, bPQ ? IndexType_e::PERCOLATE : IndexType_e::RT, dWarnings, sError ) )
{
sError.SetSprintf ( "unable to import table '%s': %s", tStmt.m_sIndex.cstr(), sError.cstr() );
tOut.Error ( sError.cstr() );
return;
}
if ( dWarnings.GetLength() )
{
StringBuilder_c sWarn ( "; " );
for ( const auto & i : dWarnings )
sWarn << i;
sWarning = sWarn.cstr();
}
tOut.Ok();
}
//////////////////////////////////////////////////////////////////////////
void HandleMysqlFreezeIndexes ( RowBuffer_i& tOut, const SqlStmt_t& tStmt, CSphString& sWarningOut )
{
// search through specified local indexes
StrVec_t dIndexes, dNonlockedIndexes, dIndexFiles;
ParseIndexList ( tStmt.m_sIndex, dIndexes );
for ( const auto& sIndex : dIndexes )
{
auto pIndex = GetServed ( sIndex );
if ( !ServedDesc_t::IsMutable ( pIndex ) )
{
dNonlockedIndexes.Add ( sIndex );
continue;
}
RIdx_T<RtIndex_i*> pRt { pIndex };
pRt->LockFileState ( dIndexFiles );
}
int iWarnings=0;
if ( !dNonlockedIndexes.IsEmpty() )
{
StringBuilder_c sWarning;
sWarning << "Some tables are not suitable for freezing: ";
sWarning.StartBlock();
dNonlockedIndexes.for_each ( [&sWarning] ( const auto& sValue ) { sWarning << sValue; } );
sWarning.FinishBlocks ();
sWarning.MoveTo ( sWarningOut );
++iWarnings;
}
CheckLike tSelector { tStmt.m_sStringParam.cstr() };
tOut.HeadBegin ();
tOut.HeadColumn ( "file" );
tOut.HeadColumn ( "normalized" );
tOut.HeadEnd();
for ( const auto& sFile : dIndexFiles )
{
if ( !tSelector.Match ( sFile.cstr() ) )
continue;
tOut.PutString ( sFile );
tOut.PutString ( RealPath ( sFile ) );
if ( !tOut.Commit() )
return;
};
tOut.Eof ( false, iWarnings );
}
void HandleMysqlUnfreezeIndexes ( RowBuffer_i& tOut, const CSphString& sIndexes )
{
// search through specified local indexes
StrVec_t dIndexes;
int iUnlocked=0;
ParseIndexList ( sIndexes, dIndexes );
for ( const auto& sIndex : dIndexes )
{
auto pServed = GetServed ( sIndex );
if ( !ServedDesc_t::IsMutable ( pServed ) )
continue;
// here we get non-locked instance to avoid deadlock with update, that is:
// update may acquire w-lock and then wait until index is unfrozen to continue,
// but we can't unfreeze, if we need the lock for it.
auto * pRt = static_cast<RtIndex_i *> ( UnlockedHazardIdxFromServed ( *pServed ) );
if ( !pRt )
continue;
pRt->EnableSave ();
++iUnlocked;
}
tOut.Ok ( iUnlocked );
}
void HandleMysqlKill ( RowBuffer_i& tOut, int iKill )
{
int iKilled = 0;
IterateTasks ( [&iKilled, iKill] ( ClientTaskInfo_t* pTask ) {
if ( pTask && pTask->GetConnID() == iKill && !pTask->GetKilled())
{
pTask->SetKilled(true);
++iKilled;
}
} );
if ( !iKilled )
{
tOut.ErrorEx ( SphSprintf ( "Unknown connection id: %d", iKill ).cstr(), EMYSQL_ERR::NO_SUCH_THREAD );
} else
{
tOut.Ok ( iKilled );
}
}
void HandleMysqlShowLocks ( RowBuffer_i & tOut )
{
tOut.HeadBegin ();
tOut.HeadColumn ( "Type" );
tOut.HeadColumn ( "Name" );
tOut.HeadColumn ( "Lock Type" );
tOut.HeadColumn ( "Additional Info" );
if ( !tOut.HeadEnd () )
return;
// collect local, rt, percolate
auto dIndexes = GetAllServedIndexes ();
for ( auto & dPair: dIndexes )
{
switch ( dPair.m_eType )
{
case IndexType_e::RT:
case IndexType_e::PERCOLATE:
{
auto pIndex = GetServed ( dPair.m_sName );
assert ( ServedDesc_t::IsMutable ( pIndex ) );
RIdx_T<RtIndex_i *> pRt { pIndex };
int iLocks = pRt->GetNumOfLocks ();
if ( iLocks>0 )
{
tOut.PutString ( GetIndexTypeName ( dPair.m_eType ) );
tOut.PutString ( dPair.m_sName );
tOut.PutString ( "freeze" );
tOut.PutStringf ( "Count: %d", iLocks );
if ( !tOut.Commit () )
return;
}
}
default:
break;
}
}
tOut.Eof ( false );
}
RtAccum_t* CSphSessionAccum::GetAcc ( RtIndex_i* pIndex, CSphString& sError )
{
assert ( pIndex );
if ( !m_tAcc )
m_tAcc.emplace();
if ( !pIndex->BindAccum ( &m_tAcc.value(), &sError ) )
return nullptr;
return &m_tAcc.value();
}
RtAccum_t* CSphSessionAccum::GetAcc()
{
if ( !m_tAcc )
m_tAcc.emplace();
return &m_tAcc.value();
}
RtIndex_i * CSphSessionAccum::GetIndex ()
{
if ( !m_tAcc )
return nullptr;
return m_tAcc->GetIndex();
}
static bool FixupFederatedQuery ( ESphCollation eCollation, CSphVector<SqlStmt_t> & dStmt, CSphString & sError, CSphString & sFederatedQuery );
static const CSphString g_sLogDoneStmt = "/* DONE */";
static const Str_t g_tLogDoneStmt = FromStr ( g_sLogDoneStmt );
struct LogStmtGuard_t
{
LogStmtGuard_t ( const Str_t & sQuery, SqlStmt_e eStmt, bool bMulti )
{
m_tmStarted = LogFilterStatementSphinxql ( sQuery, eStmt );
m_bLogDone = ( m_tmStarted && eStmt!=STMT_UPDATE && eStmt!=STMT_SELECT && !bMulti ); // update and select will log differently
}
~LogStmtGuard_t ()
{
if ( m_bLogDone )
{
int64_t tmDelta = sphMicroTimer() - m_tmStarted;
LogStatementSphinxql ( g_tLogDoneStmt, (int)( tmDelta / 1000 ) );
}
}
int64_t m_tmStarted = 0;
bool m_bLogDone = false;
};
void ClientSession_c::FreezeLastMeta()
{
m_tLastMeta = CSphQueryResultMeta();
m_tLastMeta.m_sError = m_sError;
m_tLastMeta.m_sWarning = "";
}
static void HandleMysqlShowSettings ( const CSphConfig & hConf, RowBuffer_i & tOut );
// just execute one sphinxql statement
//
// IMPORTANT! this does NOT start or stop profiling, as there a few external
// things (client net reads and writes) that we want to profile, too
//
// returns true if the current profile should be kept (default)
// returns false if profile should be discarded (eg. SHOW PROFILE case)
bool ClientSession_c::Execute ( Str_t sQuery, RowBuffer_i & tOut )
{
auto& tSess = session::Info();
// set on query guard
tSess.SetTaskState ( TaskState_e::QUERY );
auto& tCrashQuery = GlobalCrashQueryGetRef();
tCrashQuery.m_eType = QUERY_SQL;
tCrashQuery.m_dQuery = { (const BYTE*) sQuery.first, sQuery.second };
// parse SQL query
if ( tSess.IsProfile() )
m_tProfile.Switch ( SPH_QSTATE_SQL_PARSE );
m_sError = "";
CSphVector<SqlStmt_t> dStmt;
bool bParsedOK = sphParseSqlQuery ( sQuery, dStmt, m_sError, tSess.GetCollation () );
if ( tSess.IsProfile() )
m_tProfile.Switch ( SPH_QSTATE_UNKNOWN );
SqlStmt_e eStmt = STMT_PARSE_ERROR;
if ( bParsedOK )
{
eStmt = dStmt[0].m_eStmt;
dStmt[0].m_sStmt = sQuery.first;
}
const SqlStmt_e ePrevStmt = m_eLastStmt;
if ( eStmt!=STMT_SHOW_META )
m_eLastStmt = eStmt;
SqlStmt_t * pStmt = dStmt.Begin();
assert ( !bParsedOK || pStmt );
myinfo::SetCommand ( g_dSqlStmts[eStmt] );
AT_SCOPE_EXIT ( []() { myinfo::SetCommandDone(); } );
LogStmtGuard_t tLogGuard ( sQuery, eStmt, dStmt.GetLength()>1 );
if ( bParsedOK && m_bFederatedUser )
{
if ( !FixupFederatedQuery ( tSess.GetCollation (), dStmt, m_sError, m_sFederatedQuery ) )
{
FreezeLastMeta();
tOut.Error ( m_sError.cstr() );
return true;
}
}
// handle multi SQL query
if ( bParsedOK && dStmt.GetLength()>1 )
{
m_sError = "";
HandleMysqlMultiStmt ( dStmt, m_tLastMeta, tOut, m_sError );
return true; // FIXME? how does this work with profiling?
}
// handle SQL query
switch ( eStmt )
{
case STMT_PARSE_ERROR:
if ( m_sError.IsEmpty() )
tOut.Ok ( 0 );
else {
FreezeLastMeta();
tOut.Error ( m_sError.cstr() );
}
return true;
case STMT_SELECT:
{
MEMORY ( MEM_SQL_SELECT );
if ( ClusterFlavour () )
{
auto dParts = sphSplit ( pStmt->m_sIndex.cstr (), ":" );
if ( dParts.GetLength ()>1 )
{
pStmt->m_sCluster = dParts[0];
pStmt->m_tQuery.m_sIndexes = pStmt->m_sIndex = dParts[1];
}
}
StatCountCommand ( SEARCHD_COMMAND_SEARCH );
auto tmStart = sphMicroTimer();
SearchHandler_c tHandler ( 1, sphCreatePlainQueryParser(), QUERY_SQL, true );
// no log for search queries from the buddy in the info verbosity
if ( session::IsQueryLogDisabled() )
dStmt.Begin()->m_tQuery.m_uDebugFlags |= QUERY_DEBUG_NO_LOG;
tHandler.SetQuery ( 0, dStmt.Begin()->m_tQuery, std::move ( dStmt.Begin()->m_pTableFunc ) );
tHandler.m_pStmt = pStmt;
if ( tSess.IsProfile() )
tHandler.SetProfile ( &m_tProfile );
if ( m_bFederatedUser )
tHandler.SetFederatedUser();
tOut.SomethingWasSent();
if ( HandleMysqlSelect ( tOut, tHandler ) && !tOut.SomethingWasSent() )
{
// query just completed ok; reset out error message
m_sError = "";
AggrResult_t & tLast = tHandler.m_dAggrResults.Last();
auto uMatches = SendMysqlSelectResult ( tOut, tLast, false, m_bFederatedUser, &m_sFederatedQuery, ( tSess.IsProfile() ? &m_tProfile : nullptr ) );
StatCountCommandDetails ( SearchdStats_t::eSearch, uMatches, tmStart );
}
// save meta for SHOW META (profile is saved elsewhere)
m_tLastMeta = tHandler.m_dAggrResults.Last();
return true;
}
case STMT_SHOW_WARNINGS:
HandleMysqlWarning ( m_tLastMeta, tOut, false );
return true;
case STMT_SHOW_STATUS:
case STMT_SHOW_AGENT_STATUS:
if ( eStmt==STMT_SHOW_STATUS )
{
StatCountCommand ( SEARCHD_COMMAND_STATUS );
}
HandleMysqlStatus ( tOut, *pStmt, false );
return true;
case STMT_SHOW_META:
if ( ePrevStmt!=STMT_CALL )
HandleMysqlMeta ( tOut, *pStmt, m_tLastMeta, false );
else
HandleMysqlPercolateMeta ( m_tPercolateMeta, m_tLastMeta.m_sWarning, tOut );
return true;
case STMT_INSERT:
case STMT_REPLACE:
{
StmtErrorReporter_c tErrorReporter ( tOut );
MaybeFixupIndexNameFromMysqldump ( *pStmt );
sphHandleMysqlInsert ( tErrorReporter, *pStmt );
return true;
}
case STMT_DELETE:
{
StmtErrorReporter_c tErrorReporter ( tOut );
sphHandleMysqlDelete ( tErrorReporter, *pStmt, sQuery );
return true;
}
case STMT_SET:
StatCountCommand ( SEARCHD_COMMAND_UVAR );
HandleMysqlSet ( tOut, *pStmt, m_tAcc );
return false;
case STMT_BEGIN:
{
StmtErrorReporter_c tErrorReporter ( tOut );
sphHandleMysqlBegin ( tErrorReporter, sQuery );
return true;
}
case STMT_COMMIT:
case STMT_ROLLBACK:
{
StmtErrorReporter_c tErrorReporter ( tOut );
sphHandleMysqlCommitRollback ( tErrorReporter, sQuery, eStmt==STMT_COMMIT );
return true;
}
case STMT_CALL:
// IMPORTANT! if you add a new builtin here, do also add it
// in the comment to STMT_CALL line in SqlStmt_e declaration,
// the one that lists expansions for doc/check.pl
pStmt->m_sCallProc.ToUpper();
if ( pStmt->m_sCallProc=="SNIPPETS" )
HandleMysqlCallSnippets ( tOut, *pStmt );
else if ( pStmt->m_sCallProc=="KEYWORDS" )
HandleMysqlCallKeywords ( tOut, *pStmt, m_tLastMeta.m_sWarning );
else if ( pStmt->m_sCallProc=="SUGGEST" )
HandleMysqlCallSuggest ( tOut, *pStmt, false );
else if ( pStmt->m_sCallProc=="QSUGGEST" )
HandleMysqlCallSuggest ( tOut, *pStmt, true );
else if ( pStmt->m_sCallProc=="PQ" )
{
HandleMysqlCallPQ ( tOut, *pStmt, m_tAcc, m_tPercolateMeta );
m_tPercolateMeta.m_dResult.m_sMessages.MoveWarningsTo ( m_tLastMeta.m_sWarning );
m_tPercolateMeta.m_dDocids.Reset ( 0 ); // free occupied mem
} else
{
m_sError.SetSprintf ( "no such built-in procedure %s", pStmt->m_sCallProc.cstr() );
tOut.Error ( m_sError.cstr() );
}
return true;
case STMT_DESCRIBE:
HandleMysqlDescribe ( tOut, pStmt );
return true;
case STMT_SHOW_TABLES:
HandleMysqlShowTables ( tOut, pStmt );
return true;
case STMT_CREATE_TABLE:
m_tLastMeta = CSphQueryResultMeta();
HandleMysqlCreateTable ( tOut, *pStmt, m_tLastMeta.m_sWarning );
return true;
case STMT_CREATE_TABLE_LIKE:
m_tLastMeta = CSphQueryResultMeta();
HandleMysqlCreateTableLike ( tOut, *pStmt, m_tLastMeta.m_sWarning );
return true;
case STMT_DROP_TABLE:
m_tLastMeta.m_sWarning = "";
HandleMysqlDropTable ( tOut, *pStmt, m_tLastMeta.m_sWarning );
return true;
case STMT_SHOW_CREATE_TABLE:
HandleMysqlShowCreateTable ( tOut, *pStmt );
return true;
case STMT_UPDATE:
{
StmtErrorReporter_c tErrorReporter ( tOut );
sphHandleMysqlUpdate ( tErrorReporter, *pStmt, sQuery );
return true;
}
case STMT_DUMMY:
if ( !pStmt->m_dInsertSchema.IsEmpty() )
{
// empty with schema (something like 'show triggers' expects schema even if there are no results)
tOut.HeadOfStrings ( pStmt->m_dInsertSchema );
tOut.Eof();
return true;
}
tOut.Ok();
return true;
case STMT_CREATE_FUNCTION:
if ( !sphPluginCreate ( pStmt->m_sUdfLib.cstr(), PLUGIN_FUNCTION, pStmt->m_sUdfName.cstr(), pStmt->m_eUdfType, m_sError ) )
tOut.Error ( m_sError.cstr() );
else
tOut.Ok();
SphinxqlStateFlush ();
return true;
case STMT_DROP_FUNCTION:
if ( !sphPluginDrop ( PLUGIN_FUNCTION, pStmt->m_sUdfName.cstr(), m_sError ) )
tOut.Error ( m_sError.cstr() );
else
tOut.Ok();
SphinxqlStateFlush ();
return true;
case STMT_CREATE_PLUGIN:
case STMT_DROP_PLUGIN:
{
// convert plugin type string to enum
PluginType_e eType = sphPluginGetType ( pStmt->m_sStringParam );
if ( eType==PLUGIN_TOTAL )
{
tOut.ErrorEx ( "unknown plugin type '%s'", pStmt->m_sStringParam.cstr() );
break;
}
// action!
bool bRes;
if ( eStmt==STMT_CREATE_PLUGIN )
bRes = sphPluginCreate ( pStmt->m_sUdfLib.cstr(), eType, pStmt->m_sUdfName.cstr(), SPH_ATTR_NONE, m_sError );
else
bRes = sphPluginDrop ( eType, pStmt->m_sUdfName.cstr(), m_sError );
// report
if ( !bRes )
tOut.Error ( m_sError.cstr() );
else
tOut.Ok();
SphinxqlStateFlush ();
return true;
}
case STMT_RELOAD_PLUGINS:
if ( sphPluginReload ( pStmt->m_sUdfLib.cstr(), m_sError ) )
tOut.Ok();
else
tOut.Error ( m_sError.cstr() );
return true;
case STMT_ATTACH_INDEX:
m_tLastMeta.m_sWarning = "";
HandleMysqlAttach ( tOut, *pStmt, m_tLastMeta.m_sWarning );
return true;
case STMT_FLUSH_RTINDEX:
HandleMysqlFlushRtindex ( tOut, *pStmt );
return true;
case STMT_FLUSH_RAMCHUNK:
HandleMysqlFlushRamchunk ( tOut, *pStmt );
return true;
case STMT_SHOW_VARIABLES:
HandleMysqlShowVariables ( tOut, *pStmt );
return true;
case STMT_TRUNCATE_RTINDEX:
m_tLastMeta.m_sWarning = "";
HandleMysqlTruncate ( tOut, *pStmt, m_tLastMeta.m_sWarning );
return true;
case STMT_OPTIMIZE_INDEX:
HandleMysqlOptimize ( tOut, *pStmt );
return true;
case STMT_SELECT_COLUMNS:
HandleMysqlSelectColumns ( tOut, *pStmt, this );
return true;
case STMT_SHOW_COLLATION:
HandleMysqlShowCollations ( tOut );
return true;
case STMT_SHOW_CHARACTER_SET:
HandleMysqlShowCharacterSet ( tOut );
return true;
case STMT_SHOW_INDEX_STATUS:
HandleMysqlShowIndexStatus ( tOut, *pStmt );
return true;
case STMT_SHOW_FEDERATED_INDEX_STATUS:
HandleMysqlShowFederatedIndexStatus ( tOut, *pStmt );
return true;
case STMT_SHOW_INDEX_SETTINGS:
HandleMysqlShowIndexSettings ( tOut, *pStmt );
return true;
case STMT_SHOW_PROFILE:
HandleMysqlShowProfile ( tOut, m_tLastProfile, false );
return false; // do not profile this call, keep last query profile
case STMT_ALTER_ADD:
HandleMysqlAlter ( tOut, *pStmt, Alter_e::AddColumn );
return true;
case STMT_ALTER_MODIFY:
HandleMysqlAlter ( tOut, *pStmt, Alter_e::ModifyColumn );
return true;
case STMT_ALTER_DROP:
HandleMysqlAlter ( tOut, *pStmt, Alter_e::DropColumn );
return true;
case STMT_ALTER_REBUILD_SI:
HandleMysqlAlter ( tOut, *pStmt, Alter_e::RebuildSI );
return true;
case STMT_SHOW_PLAN:
HandleMysqlShowPlan ( tOut, m_tLastProfile, false, ::IsDot ( *pStmt ) );
return false; // do not profile this call, keep last query profile
case STMT_SHOW_DATABASES:
HandleMysqlShowDatabases ( tOut, *pStmt );
return true;
case STMT_SHOW_PLUGINS:
HandleMysqlShowPlugins ( tOut, *pStmt );
return true;
case STMT_SHOW_THREADS:
HandleMysqlShowThreads ( tOut, pStmt );
return true;
case STMT_ALTER_RECONFIGURE: // ALTER RTINDEX/TABLE <idx> RECONFIGURE
FreezeLastMeta();
HandleMysqlReconfigure ( tOut, *pStmt, m_tLastMeta.m_sWarning );
return true;
case STMT_ALTER_KLIST_TARGET: // ALTER TABLE <idx> KILLLIST_TARGET = 'the string'
FreezeLastMeta();
HandleMysqlAlterKlist ( tOut, *pStmt, m_tLastMeta.m_sWarning );
return true;
case STMT_ALTER_INDEX_SETTINGS: // ALTER TABLE <idx> create_table_option_list
FreezeLastMeta();
HandleMysqlAlterIndexSettings ( tOut, *pStmt, m_tLastMeta.m_sWarning );
return true;
case STMT_FLUSH_INDEX:
HandleMysqlFlush ( tOut, *pStmt );
return true;
case STMT_RELOAD_INDEX:
FreezeLastMeta();
HandleMysqlReloadIndex ( tOut, *pStmt, m_tLastMeta.m_sWarning );
return true;
case STMT_FLUSH_HOSTNAMES:
HandleMysqlFlushHostnames ( tOut );
return true;
case STMT_FLUSH_LOGS:
HandleMysqlFlushLogs ( tOut );
return true;
case STMT_RELOAD_INDEXES:
HandleMysqlReloadIndexes ( tOut );
return true;
case STMT_DEBUG:
HandleMysqlDebug ( tOut, pStmt->m_pDebugCmd.get(), m_tLastProfile );
return false; // do not profile this call, keep last query profile
case STMT_JOIN_CLUSTER:
if ( ClusterJoin ( pStmt->m_sIndex, pStmt->m_dCallOptNames, pStmt->m_dCallOptValues, pStmt->m_bClusterUpdateNodes ) )
tOut.Ok();
else
{
TlsMsg::MoveError ( m_sError );
tOut.Error ( m_sError.cstr() );
}
return true;
case STMT_CLUSTER_CREATE:
if ( ClusterCreate ( pStmt->m_sIndex, pStmt->m_dCallOptNames, pStmt->m_dCallOptValues ) )
tOut.Ok();
else
{
TlsMsg::MoveError ( m_sError );
tOut.Error ( m_sError.cstr() );
}
return true;
case STMT_CLUSTER_DELETE:
m_tLastMeta = CSphQueryResultMeta();
if ( GloballyDeleteCluster ( pStmt->m_sIndex, m_tLastMeta.m_sError ) )
tOut.Ok ( 0, m_tLastMeta.m_sWarning.IsEmpty() ? 0 : 1 );
else
tOut.Error ( m_tLastMeta.m_sError.cstr() );
return true;
case STMT_CLUSTER_ALTER_ADD:
case STMT_CLUSTER_ALTER_DROP:
m_tLastMeta = CSphQueryResultMeta();
if ( ClusterAlter ( pStmt->m_sCluster, pStmt->m_sIndex, ( eStmt==STMT_CLUSTER_ALTER_ADD ), m_tLastMeta.m_sError ) )
tOut.Ok ( 0, m_tLastMeta.m_sWarning.IsEmpty() ? 0 : 1 );
else
tOut.Error ( m_tLastMeta.m_sError.cstr() );
return true;
case STMT_CLUSTER_ALTER_UPDATE:
m_tLastMeta = CSphQueryResultMeta();
if ( ClusterAlterUpdate ( pStmt->m_sCluster, pStmt->m_sSetName, m_tLastMeta.m_sError ) )
tOut.Ok();
else
tOut.Error ( m_tLastMeta.m_sError.cstr() );
return true;
case STMT_EXPLAIN:
HandleMysqlExplain ( tOut, *pStmt, IsDot ( *pStmt ) );
return true;
case STMT_IMPORT_TABLE:
FreezeLastMeta();
HandleMysqlImportTable ( tOut, *pStmt, m_tLastMeta.m_sWarning );
return true;
case STMT_FREEZE:
HandleMysqlFreezeIndexes ( tOut, *pStmt, m_tLastMeta.m_sWarning);
return true;
case STMT_UNFREEZE:
HandleMysqlUnfreezeIndexes ( tOut, pStmt->m_sIndex );
return true;
case STMT_SHOW_SETTINGS:
{
ScRL_t dRotateConfigMutexRlocked { g_tRotateConfigMutex };
HandleMysqlShowSettings ( g_hCfg, tOut );
}
return true;
case STMT_KILL:
HandleMysqlKill ( tOut, pStmt->m_iIntParam );
return true;
case STMT_SHOW_LOCKS:
HandleMysqlShowLocks ( tOut );
return true;
default:
m_sError.SetSprintf ( "internal error: unhandled statement type (value=%d)", eStmt );
tOut.Error ( m_sError.cstr() );
return true;
} // switch
return true; // for cases that break early
}
bool session::IsAutoCommit ( const ClientSession_c* pSession )
{
assert ( pSession );
return pSession->m_bAutoCommit;
}
bool session::IsAutoCommit ()
{
return IsAutoCommit ( GetClientSession() );
}
bool session::IsInTrans ( const ClientSession_c* pSession )
{
assert ( pSession );
return pSession->m_bInTransaction;
}
VecTraits_T<int64_t> session::LastIds ()
{
return GetClientSession()->m_dLastIds;
}
void session::SetOptimizeById ( bool bOptimizeById )
{
GetClientSession()->m_bOptimizeById = bOptimizeById;
}
bool session::GetOptimizeById()
{
return GetClientSession()->m_bOptimizeById;
}
void session::SetDeprecatedEOF ( bool bDeprecatedEOF )
{
GetClientSession()->m_bDeprecatedEOF = bDeprecatedEOF;
}
bool session::GetDeprecatedEOF()
{
return GetClientSession()->m_bDeprecatedEOF;
}
bool session::Execute ( Str_t sQuery, RowBuffer_i& tOut )
{
return GetClientSession()->Execute ( sQuery, tOut );
}
void session::SetFederatedUser ()
{
GetClientSession()->m_bFederatedUser = true;
}
void session::SetUser ( const CSphString & sUser )
{
GetClientSession()->m_sUser = sUser;
}
void session::SetAutoCommit ( bool bAutoCommit )
{
GetClientSession()->m_bAutoCommit = bAutoCommit;
}
void session::SetInTrans ( bool bInTrans )
{
GetClientSession()->m_bInTransaction = bInTrans;
}
bool session::IsInTrans ()
{
return IsInTrans ( GetClientSession() );
}
QueryProfile_c * session::StartProfiling ( ESphQueryState eState )
{
auto pSession = GetClientSession();
QueryProfile_c* pProfile = nullptr;
if ( session::IsProfile() ) // the current statement might change it
{
pProfile = &pSession->m_tProfile;
pProfile->Start ( eState );
}
return pProfile;
}
void session::SaveLastProfile ()
{
auto pSession = GetClientSession();
pSession->m_tLastProfile = pSession->m_tProfile;
}
/// sphinxql command over API
void HandleCommandSphinxql ( GenericOutputBuffer_c & tOut, WORD uVer, InputBuffer_c & tReq ) REQUIRES (HandlerThread)
{
if ( !CheckCommandVersion ( uVer, VER_COMMAND_SPHINXQL, tOut ) )
return;
auto tReply = APIAnswer ( tOut, VER_COMMAND_SPHINXQL );
// parse and run request
CSphVector<BYTE> dString;
tReq.GetString ( dString );
RunSingleSphinxqlCommand ( dString, tOut );
}
/// json command over API
void HandleCommandJson ( ISphOutputBuffer & tOut, WORD uVer, InputBuffer_c & tReq )
{
if ( !CheckCommandVersion ( uVer, VER_COMMAND_JSON, tOut ) )
return;
// parse request
CSphString sEndpoint = tReq.GetString ();
CSphString sCommand = tReq.GetString ();
CSphVector<BYTE> dResult;
sphProcessHttpQueryNoResponce ( sEndpoint, sCommand, dResult );
auto tReply = APIAnswer ( tOut, VER_COMMAND_JSON );
tOut.SendString ( sEndpoint.cstr() );
tOut.SendArray ( dResult );
}
void StatCountCommand ( SearchdCommand_e eCmd )
{
if ( eCmd<SEARCHD_COMMAND_TOTAL )
gStats ().m_iCommandCount[eCmd].fetch_add ( 1, std::memory_order_relaxed );
}
// fixme! move federated stuff to another parser and remove all kind of 'fixups'
bool FixupFederatedQuery ( ESphCollation eCollation, CSphVector<SqlStmt_t> & dStmt, CSphString & sError, CSphString & sFederatedQuery )
{
if ( !dStmt.GetLength() )
return true;
if ( dStmt.GetLength()>1 )
{
sError.SetSprintf ( "multi-query not supported" );
return false;
}
SqlStmt_t & tStmt = dStmt[0];
if ( tStmt.m_eStmt==STMT_SHOW_FEDERATED_INDEX_STATUS )
return true;
else if ( tStmt.m_eStmt == STMT_SET )
return true;
else if ( tStmt.m_eStmt != STMT_SELECT)
{
sError.SetSprintf ( "unhandled statement type (value=%d)", tStmt.m_eStmt );
return false;
}
CSphQuery & tSrcQuery = tStmt.m_tQuery;
// remove query column as it got generated
ARRAY_FOREACH ( i, tSrcQuery.m_dItems )
{
if ( tSrcQuery.m_dItems[i].m_sAlias=="query" )
{
tSrcQuery.m_dItems.Remove ( i );
break;
}
}
// move actual query from filter to query itself
if ( tSrcQuery.m_dFilters.GetLength()!=1 ||
tSrcQuery.m_dFilters[0].m_sAttrName!="query" || tSrcQuery.m_dFilters[0].m_eType!=SPH_FILTER_STRING || tSrcQuery.m_dFilters[0].m_dStrings.GetLength()!=1 )
return true;
const CSphString & sRealQuery = tSrcQuery.m_dFilters[0].m_dStrings[0];
// parse real query
CSphVector<SqlStmt_t> dRealStmt;
bool bParsedOK = sphParseSqlQuery ( FromStr ( sRealQuery ), dRealStmt, sError, eCollation );
if ( !bParsedOK )
return false;
if ( dRealStmt.GetLength()!=1 )
{
sError.SetSprintf ( "multi-query not supported, got queries=%d", dRealStmt.GetLength() );
return false;
}
SqlStmt_t & tRealStmt = dRealStmt[0];
if ( tRealStmt.m_eStmt!=STMT_SELECT )
{
sError.SetSprintf ( "unhandled statement type (value=%d)", tRealStmt.m_eStmt );
return false;
}
// keep originals
CSphQuery & tRealQuery = tRealStmt.m_tQuery;
tRealQuery.m_dRefItems = tSrcQuery.m_dItems; //select list items
tRealQuery.m_sIndexes = tSrcQuery.m_sIndexes; // index name
sFederatedQuery = sRealQuery;
// merge select list items
SmallStringHash_T<int> hItems;
ARRAY_FOREACH ( i, tRealQuery.m_dItems )
hItems.Add ( i, tRealQuery.m_dItems[i].m_sAlias );
for ( CSphQueryItem & tItem : tRealQuery.m_dRefItems )
{
int * pRealItem = hItems ( tItem.m_sAlias );
if ( !pRealItem )
{
tRealQuery.m_dItems.Add ( tItem );
} else
{
// change original item name to match on minimize result set
CSphQueryItem & tRealItem = tRealQuery.m_dItems[*pRealItem];
if ( tItem.m_sExpr!=tRealItem.m_sExpr )
tItem.m_sExpr = tRealItem.m_sExpr;
}
}
// query setup
tSrcQuery = tRealQuery;
return true;
}
/////////////////////////////////////////////////////////////////////////////
// INDEX ROTATION
/////////////////////////////////////////////////////////////////////////////
static bool ApplyIndexKillList ( const CSphIndex * pIndex, CSphString & sWarning, CSphString & sError, bool bShowMessage )
{
CSphFixedVector<DocID_t> dKillList(0);
KillListTargets_c tTargets;
if ( !pIndex->LoadKillList ( &dKillList, tTargets, sError ) )
return false;
if ( !tTargets.m_dTargets.GetLength() )
return true;
if ( bShowMessage )
sphInfo ( "applying killlist of table '%s'", pIndex->GetName() );
for ( const auto & tIndex : tTargets.m_dTargets )
{
// just in case; otherwise we'll be rlocking an already rlocked index
if ( tIndex.m_sIndex==pIndex->GetName() )
{
sWarning.SetSprintf ( "table '%s': applying killlist to itself", tIndex.m_sIndex.cstr() );
continue;
}
auto pServed = GetServed ( tIndex.m_sIndex );
if ( pServed )
{
RWIdx_c pTarget { pServed };
// kill the docids provided by sql_query_killlist and similar
if ( tIndex.m_uFlags & KillListTarget_t::USE_KLIST )
pTarget->KillMulti ( dKillList );
// kill all the docids present in this index
if ( tIndex.m_uFlags & KillListTarget_t::USE_DOCIDS )
pIndex->KillExistingDocids ( pTarget );
}
else
sWarning.SetSprintf ( "table '%s' from killlist_target not found", tIndex.m_sIndex.cstr() );
}
return true;
}
// we don't rlock/wlock the index because we assume that we are being called from a place that already did that for us
bool ApplyKillListsTo ( CSphIndex* pKillListTarget, CSphString & sError )
{
KillListTargets_c tTargets;
ServedSnap_t hLocal = g_pLocalIndexes->GetHash();
for ( const auto& tIt : *hLocal )
{
if ( tIt.first==pKillListTarget->GetName () || !tIt.second )
continue;
RWIdx_c pIndexWithKillList { tIt.second };
CSphFixedVector<DocID_t> dKillList(0);
tTargets.m_dTargets.Resize(0);
if ( !pIndexWithKillList->LoadKillList ( &dKillList, tTargets, sError ) )
return false;
if ( !dKillList.GetLength() )
continue;
// if this index has 'our' index as its killlist_target, apply the killlist
for ( const auto & tIndex : tTargets.m_dTargets )
if ( tIndex.m_sIndex== pKillListTarget->GetName() )
{
if ( tIndex.m_uFlags & KillListTarget_t::USE_KLIST )
pKillListTarget->KillMulti ( dKillList );
// kill all the docids present in this index
if ( tIndex.m_uFlags & KillListTarget_t::USE_DOCIDS )
pIndexWithKillList->KillExistingDocids ( pKillListTarget );
}
}
return true;
}
bool PreloadKlistTarget ( const CSphString& sBase, RotateFrom_e eFrom, StrVec_t & dKlistTarget )
{
switch ( eFrom )
{
case RotateFrom_e::NEW:
case RotateFrom_e::NEW_AND_OLD:
return IndexFiles_c ( sBase ).ReadKlistTargets ( dKlistTarget, ".new" );
case RotateFrom_e::REENABLE:
return IndexFiles_c ( sBase ).ReadKlistTargets ( dKlistTarget );
default:
return false;
}
}
static bool ApplyOthersKillListsToMe ( CSphIndex* pIndex, const char* szIndex, CSphString& sError )
{
sphLogDebug ( "rotating table '%s': applying other tables killlists", szIndex );
// apply other indexes' killlists to THIS index
if ( !ApplyKillListsTo ( pIndex, sError ) )
{
sphWarning ( "rotating table '%s': %s", szIndex, sError.cstr() );
return false;
}
sphLogDebug ( "rotating table '%s': applying other tables killlists... DONE", szIndex );
return true;
}
static bool ApplyMyKillListsToOthers ( const CSphIndex* pIndex, const char* szIndex, CSphString& sError )
{
sphLogDebug ( "rotating table '%s': apply killlist from this table to other tables (killlist_target)", szIndex );
// apply killlist from this index to other indexes (killlist_target)
// if this fails, only show a warning
CSphString sWarning;
if ( !ApplyIndexKillList ( pIndex, sWarning, sError ) )
{
return false;
sphWarning ( "rotating table '%s': %s", szIndex, sError.cstr() );
}
if ( sWarning.Length() )
sphWarning ( "rotating table '%s': %s", szIndex, sWarning.cstr() );
sphLogDebug ( "rotating table '%s': apply killlist from this table to other tables (killlist_target)... DONE", szIndex );
return true;
}
bool ApplyKilllistsMyAndToMe ( CSphIndex* pIdx, const char* szIndex, CSphString& sError )
{
return ApplyOthersKillListsToMe ( pIdx, szIndex, sError ) && ApplyMyKillListsToOthers ( pIdx, szIndex, sError );
}
// tServed here might be one of:
// 1. Not yet served, and with .new ext. Need to be rotated, then loaded from scratch
// 2. Not yet served, need to be loaded from scratch
// 3. Served, but with now with .new. Need to be rotated, then loaded and need to be rotated
bool RotateIndexGreedy ( const ServedIndex_c& tServed, const char* szIndex, CSphString& sError )
{
assert ( tServed.m_eType == IndexType_e::PLAIN );
sphLogDebug ( "RotateIndexGreedy for '%s' invoked", szIndex );
//////////////////
/// bool RotateIndexFilesGreedy ( const ServedDesc_t& tServed, const char* szIndex, CSphString& sError )
//////////////////
CSphIndex* pIdx = UnlockedHazardIdxFromServed ( tServed ); // it should be locked, if necessary, before
auto sIndexPath = tServed.m_sIndexPath;
if ( pIdx )
sIndexPath = pIdx->GetFilebase();
CheckIndexRotate_c tCheck ( sIndexPath );
if ( tCheck.NothingToRotate() )
return false;
IndexFiles_c dServedFiles ( sIndexPath, szIndex );
IndexFiles_c dFreshFiles ( dServedFiles.MakePath ( tCheck.RotateFromNew() ? ".new" : "" ), szIndex );
// if ( !dFreshFiles.CheckHeader() )... // no need to check, since CheckIndexRotate_c already did it.
if ( !dFreshFiles.HasAllFiles() )
{
sphWarning ( "rotating table '%s': unreadable: %s; abort rotation", szIndex, strerrorm ( errno ) );
return false;
}
bool bHasOldServedFiles = dServedFiles.HasAllFiles();
std::optional<ActionSequence_c> tActions;
if ( tCheck.RotateFromNew() )
{
tActions.emplace();
if ( bHasOldServedFiles )
tActions->Defer ( RenameFiles ( dServedFiles, "", ".old") );
tActions->Defer ( RenameFiles ( dServedFiles, ".new", "" ) );
// do files rotation
if ( !tActions->RunDefers() )
{
bool bFatal;
std::tie ( sError, bFatal ) = tActions->GetError();
sphWarning ( "RotateIndexGreedy error: %s", sError.cstr() );
if ( bFatal )
sphFatal ( "RotateIndexGreedy error: %s", sError.cstr() ); // fixme! Do we really need to fatal? (adopted from prev version)
return false;
}
}
// try to use new index
StrVec_t dWarnings;
if ( !pIdx->Prealloc ( g_bStripPath, nullptr, dWarnings ) )
{
sphWarning ( "rotating table '%s': .new preload failed: %s", szIndex, pIdx->GetLastError().cstr() );
if ( tActions )
{
if ( !tActions->UnRunDefers() )
{
bool bFatal;
std::tie ( sError, bFatal ) = tActions->GetError();
sphWarning ( "RotateIndexGreedy error: %s, NOT SERVING", sError.cstr() );
if ( bFatal )
sphFatal ( "RotateIndexGreedy error: %s", sError.cstr() ); // fixme! Do we really need to fatal? (adopted from prev version)
return false;
}
sphLogDebug ( "PreallocIndexGreedy: has recovered. Prealloc it." );
if ( !pIdx->Prealloc ( g_bStripPath, nullptr, dWarnings ) )
{
sError.SetSprintf ( "rotating table '%s': .new preload failed; ROLLBACK FAILED; TABLE UNUSABLE", szIndex );
return false;
}
}
}
assert ( pIdx->GetTokenizer() && pIdx->GetDictionary() );
for ( const auto& i : dWarnings )
sphWarning ( "rotating table '%s': %s", szIndex, i.cstr() );
if ( !pIdx->GetLastWarning().IsEmpty() )
sphWarning ( "rotating table '%s': %s", szIndex, pIdx->GetLastWarning().cstr() );
// unlink .old
if ( bHasOldServedFiles )
dServedFiles.Unlink (".old");
// finalize
if ( !ApplyKilllistsMyAndToMe ( pIdx, szIndex, sError ) )
return false;
// uff. all done
sphInfo ( "rotating table '%s': success", szIndex );
return true;
}
void DumpMemStat ()
{
#if SPH_ALLOCS_PROFILER
sphMemStatDump ( g_iLogFile );
#endif
}
/// check and report if there were any leaks since last call
void CheckLeaks () REQUIRES ( MainThread )
{
#if SPH_DEBUG_LEAKS
static int iHeadAllocs = sphAllocsCount ();
static int iHeadCheckpoint = sphAllocsLastID ();
if ( g_dThd.GetLength()==0 && !g_bInRotate && iHeadAllocs!=sphAllocsCount() )
{
sphSeek ( g_iLogFile, 0, SEEK_END );
sphAllocsDump ( g_iLogFile, iHeadCheckpoint );
iHeadAllocs = sphAllocsCount ();
iHeadCheckpoint = sphAllocsLastID ();
}
#endif
#if SPH_ALLOCS_PROFILER
int iAllocLogPeriod = 60 * 1000000;
static int64_t tmLastLog = -iAllocLogPeriod*10;
const int iAllocCount = sphAllocsCount();
const float fMemTotal = (float)sphAllocBytes();
if ( iAllocLogPeriod>0 && tmLastLog+iAllocLogPeriod<sphMicroTimer() )
{
tmLastLog = sphMicroTimer ();
const int iThdsCount = g_dThd.GetLength ();
const float fMB = 1024.0f*1024.0f;
sphInfo ( "--- allocs-count=%d, mem-total=%.4f Mb, active-threads=%d", iAllocCount, fMemTotal/fMB, iThdsCount );
DumpMemStat ();
}
#endif
}
// tricky bit
// fixup was initially intended for (very old) index formats that did not store dict/tokenizer settings
// however currently it also ends up configuring dict/tokenizer for fresh RT indexes!
// (and for existing RT indexes, settings get loaded during the Prealloc() call)
bool FixupAndLockIndex ( ServedIndex_c& tIdx, CSphIndex* pIdx, const CSphConfigSection* pConfig, const char* szIndexName, StrVec_t& dWarnings, CSphString& sError )
{
if ( pConfig )
{
std::unique_ptr<FilenameBuilder_i> pFilenameBuilder = CreateFilenameBuilder ( szIndexName );
if ( !sphFixupIndexSettings ( pIdx, *pConfig, g_bStripPath, pFilenameBuilder.get(), dWarnings, sError ) )
return false;
}
// try to lock it
return LockIndex ( tIdx, pIdx, sError );
}
/// this gets called for every new physical index
/// that is, local and RT indexes, but not distributed one
bool PreallocNewIndex ( ServedIndex_c & tIdx, const CSphConfigSection * pConfig, const char * szIndexName, StrVec_t & dWarnings, CSphString & sError )
{
std::unique_ptr<FilenameBuilder_i> pFilenameBuilder = CreateFilenameBuilder ( szIndexName );
CSphIndex* pIdx = UnlockedHazardIdxFromServed ( tIdx );
assert (pIdx);
if ( !pIdx->Prealloc ( g_bStripPath, pFilenameBuilder.get(), dWarnings ) )
{
sError.SetSprintf ( "prealloc: %s", pIdx->GetLastError().cstr() );
return false;
}
return FixupAndLockIndex ( tIdx, pIdx, pConfig, szIndexName, dWarnings, sError );
}
// same as above, but self-load config section for given index
static bool PreallocNewIndex ( ServedIndex_c & tIdx, const char * szIndexName, StrVec_t & dWarnings, CSphString & sError )
{
const CSphConfigSection * pIndexConfig = nullptr;
CSphConfigSection tIndexConfig;
{
ScRL_t dRLockConfig { g_tRotateConfigMutex };
if ( g_hCfg ( "index" ) )
pIndexConfig = g_hCfg["index"] ( szIndexName );
if ( pIndexConfig )
{
tIndexConfig = *pIndexConfig;
pIndexConfig = &tIndexConfig;
}
}
return PreallocNewIndex ( tIdx, pIndexConfig, szIndexName, dWarnings, sError );
}
// helper to switch index to another path
// (used both, greedy and seamless)
class SwitchOver_c : public ISphNoncopyable
{
const char * m_szIndex;
const CSphString & m_sBase;
const CSphString & m_sNewPath;
StrVec_t & m_dWarnings;
CSphString & m_sError;
bool m_bHaveOldFiles = false;
CSphIndex* m_pIdx = nullptr;
CSphString m_sOldPath;
std::optional<ActionSequence_c> m_tActions;
std::optional<IndexFiles_c> m_tFreshCurFiles;
public:
SwitchOver_c ( const char* szIndex, const CSphString& sBase, const CSphString& sNewPath, StrVec_t& dWarnings, CSphString& sError )
: m_szIndex { szIndex }
, m_sBase { sBase }
, m_sNewPath { sNewPath }
, m_dWarnings { dWarnings }
, m_sError { sError }
{
m_sError = "";
}
~SwitchOver_c()
{
for ( const auto& i : m_dWarnings )
sphWarning ( "switchover table '%s': %s", m_szIndex, i.cstr() );
if ( m_pIdx && !m_pIdx->GetLastWarning().IsEmpty() )
sphWarning ( "switchover table '%s': %s", m_szIndex, m_pIdx->GetLastWarning().cstr() );
}
void SetIdx( CSphIndex* pIdx ) noexcept
{
assert ( pIdx );
m_pIdx = pIdx;
m_sOldPath = pIdx->GetFilebase();
}
bool CheckSameBase () const noexcept
{
assert ( m_pIdx );
if ( m_sNewPath != m_sOldPath )
return true;
m_sError.SetSprintf ( "reload path should be different from current path" );
return false;
}
bool RotateFiles()
{
CheckIndexRotate_c tCheckNew ( m_sNewPath );
if ( tCheckNew.NothingToRotate() )
{
m_sError.SetSprintf ( "No index found by given %s path, do nothing.", m_sNewPath.cstr() );
return false;
}
bool bHaveAllFreshNewFiles = tCheckNew.RotateFromNew() && IndexFiles_c { IndexFiles_c::MakePath ( ".new", m_sNewPath ), m_szIndex }.HasAllFiles();
if ( tCheckNew.RotateReenable() )
{
IndexFiles_c tCur { IndexFiles_c::MakePath ( "", m_sNewPath ), m_szIndex };
if ( tCur.HasAllFiles() )
m_tFreshCurFiles.emplace ( std::move ( tCur ) );
}
auto bHaveAllFreshCurFiles = (bool)m_tFreshCurFiles;
if ( !bHaveAllFreshNewFiles && !bHaveAllFreshCurFiles )
{
m_sError.SetSprintf ( "rotating table '%s': unreadable: %s; abort rotation", m_szIndex, strerrorm ( errno ) );
return false;
}
if ( bHaveAllFreshNewFiles )
{
m_tActions.emplace();
if ( bHaveAllFreshCurFiles )
{
m_tActions->Defer ( RenameFiles ( *m_tFreshCurFiles, "", ".old" ) );
m_bHaveOldFiles = true;
}
m_tActions->Defer ( RenameFiles ( *m_tFreshCurFiles, ".new", "" ) );
// do files rotation
if ( !m_tActions->RunDefers() )
{
bool bFatal;
std::tie ( m_sError, bFatal ) = m_tActions->GetError();
m_sError.SetSprintf ( "SwitchoverIndexGreedy error: %s", m_sError.cstr() );
return false;
}
}
return true;
}
bool UnRotateFiles()
{
if ( m_tActions && !m_tActions->UnRunDefers() )
{
auto [sError, bFatal] = m_tActions->GetError();
m_sError.SetSprintf ( "UnRotateFiles error: %s", sError.cstr() );
return false;
}
m_bHaveOldFiles = false;
return true;
}
bool Finalize()
{
if ( !WriteLinkFile ( m_sBase, m_sNewPath, m_sError ) )
m_sError.SetSprintf ( "switchover wasn't able to populate %s.link; new persistent path to the index is NOT saved: %s", m_sBase.cstr(), m_sError.cstr() );
// finalize
assert ( m_pIdx->GetTokenizer() && m_pIdx->GetDictionary() );
if ( !ApplyKilllistsMyAndToMe ( m_pIdx, m_szIndex, m_sError ) )
sphWarning ( "switchover error when applying kill-lists: %s", m_sError.cstr() );
// unlink .old from new location (it is temporary anyway!)
if ( m_bHaveOldFiles && m_tFreshCurFiles )
m_tFreshCurFiles->Unlink ( ".old" );
// uff. all done
sphInfo ( "switchover table '%s': success", m_szIndex );
return true;
}
bool SwitchGreedy ( CSphIndex* pIdx )
{
SetIdx ( pIdx );
sphLogDebug ( "SwitchGreedy for '%s' invoked. Base %s, path %s", m_szIndex, m_sBase.cstr(), m_sNewPath.cstr() );
if ( !CheckSameBase() )
return true;
if ( !RotateFiles() )
return true;
// try to use new index
pIdx->Unlock();
pIdx->SetFilebase ( m_sNewPath );
if ( pIdx->Prealloc ( g_bStripPath, nullptr, m_dWarnings ) )
return Finalize();
// load previous version of index
pIdx->SetFilebase ( m_sOldPath );
bool bPreallocOld = pIdx->Prealloc ( g_bStripPath, nullptr, m_dWarnings );
// roll-back rotated files
UnRotateFiles();
// collect all errors
StringBuilder_c sError { "; " };
if ( !m_sError.IsEmpty() )
sError << m_sError;
if ( !bPreallocOld )
sError.Sprintf ( "SwitchGreedy table '%s': preload of new index failed, rollback also failed; TABLE UNUSABLE", m_szIndex );
sError.MoveTo ( m_sError );
return bPreallocOld;
}
// this function always returns true; which means - existing index can't be damaged by this call.
bool SwitchSeamless ( const cServedIndexRefPtr_c& pServed )
{
assert ( pServed && pServed->m_eType == IndexType_e::PLAIN );
CSphIndex* pIdx = UnlockedHazardIdxFromServed ( *pServed );
SetIdx ( pIdx );
sphLogDebug ( "SwitchSeamless for '%s' invoked. Base %s, path %s", m_szIndex, m_sBase.cstr(), m_sNewPath.cstr() );
if ( !CheckSameBase() )
return true;
if ( !RotateFiles() )
{
UnRotateFiles();
return true;
}
//////////////////
/// load new index
//////////////////
ServedIndexRefPtr_c pNewServed = MakeCloneForRotation ( pServed, m_szIndex );
pIdx = UnlockedHazardIdxFromServed ( *pNewServed );
pIdx->SetFilebase ( m_sNewPath );
// prealloc enough RAM and lock new index
sphLogDebug ( "prealloc enough RAM and lock new table" );
if ( !PreallocNewIndex ( *pNewServed, m_szIndex, m_dWarnings, m_sError ) )
return true;
pIdx->Preread();
pNewServed->UpdateMass(); // that is second update, first was at the end of Prealloc, this one is to correct after preread
RIdx_c pOldIdx { pServed };
pIdx->m_iTID = pOldIdx->m_iTID;
// pServed->SetUnlink ( pOldIdx->GetFilebase() );
SetIdx ( pIdx );
Finalize();
// all went fine; swap them
sphLogDebug ( "all went fine; swap them" );
Binlog::NotifyIndexFlush ( pIdx->m_iTID, m_szIndex, Binlog::NoShutdown, Binlog::NoSave );
g_pLocalIndexes->AddOrReplace ( pNewServed, m_szIndex );
sphInfo ( "rotating table '%s': success", m_szIndex );
// actually we always return true, because rotating from new place is always safe.
return true;
}
};
// return false, if index should not be served.
// return sError
bool SwitchoverIndexGreedy ( CSphIndex* pIdx, const char* szIndex, const CSphString& sBase, const CSphString& sNewPath, StrVec_t& dWarnings, CSphString& sError )
{
SwitchOver_c tSwitcher ( szIndex, sBase, sNewPath, dWarnings, sError );
return tSwitcher.SwitchGreedy ( pIdx );
}
bool DoSwitchoverIndexSeamless ( const cServedIndexRefPtr_c& pServed, const char* szIndex, const CSphString& sBase, const CSphString& sNewPath, StrVec_t& dWarnings, CSphString& sError ) EXCLUDES ( MainThread )
{
SwitchOver_c tSwitcher ( szIndex, sBase, sNewPath, dWarnings, sError );
return tSwitcher.SwitchSeamless ( pServed );
}
// called either from MysqlReloadIndex, either from Rotation task (never from main thread).
bool RotateIndexMT ( ServedIndexRefPtr_c& pNewServed, const CSphString & sIndex, StrVec_t & dWarnings, CSphString & sError ) EXCLUDES ( MainThread )
{
assert ( pNewServed && pNewServed->m_eType == IndexType_e::PLAIN );
sphInfo ( "rotating table '%s': started", sIndex.cstr() );
auto sRealPath = RedirectToRealPath ( pNewServed->m_sIndexPath );
CheckIndexRotate_c tCheck ( sRealPath );
if ( tCheck.NothingToRotate() )
{
sError.SetSprintf ( "nothing to rotate for table '%s'", sIndex.cstr() );
return false;
}
//////////////////
/// load new index
//////////////////
CSphIndex* pNewIndex = UnlockedHazardIdxFromServed ( *pNewServed );
if ( tCheck.RotateFromNew() )
pNewIndex->SetFilebase ( IndexFiles_c::MakePath ( ".new", sRealPath ) );
// prealloc enough RAM and lock new index
sphLogDebug ( "prealloc enough RAM and lock new table" );
if ( !PreallocNewIndex ( *pNewServed, sIndex.cstr(), dWarnings, sError ) )
return false;
pNewIndex->Preread();
pNewServed->UpdateMass(); // that is second update, first was at the end of Prealloc, this one is to correct after preread
//////////////////////
/// activate new index
//////////////////////
sphLogDebug ( "activate new table" );
if ( tCheck.RotateFromNew() )
{
ActionSequence_c tActions;
auto pServed = GetServed ( sIndex );
if ( pServed && RedirectToRealPath ( pServed->m_sIndexPath ) == sRealPath )
tActions.Defer ( RenameIdxSuffix ( pServed, ".old" ) );
tActions.Defer ( RenameIdx ( pNewIndex, sRealPath ) ); // rename 'new' to 'current'
if ( !tActions.RunDefers() )
{
bool bFatal;
std::tie ( sError, bFatal ) = tActions.GetError();
sphWarning ( "RotateIndexMT error: table %s, error %s", sIndex.cstr(), sError.cstr() );
if ( bFatal )
g_pLocalIndexes->Delete ( sIndex );
return false;
}
if ( pServed )
{
RIdx_c pOldIdx { pServed };
pNewIndex->m_iTID = pOldIdx->m_iTID;
pServed->SetUnlink ( pOldIdx->GetFilebase() );
}
}
if ( !ApplyKilllistsMyAndToMe ( pNewIndex, sIndex.cstr(), sError ) )
return false;
// all went fine; swap them
sphLogDebug ( "all went fine; swap them" );
Binlog::NotifyIndexFlush ( pNewIndex->m_iTID, sIndex.cstr(), Binlog::NoShutdown, Binlog::NoSave );
g_pLocalIndexes->AddOrReplace ( pNewServed, sIndex );
sphInfo ( "rotating table '%s': success", sIndex.cstr() );
return true;
}
static void InvokeRotation ( VecOfServed_c&& dDeferredIndexes ) REQUIRES ( MainThread )
{
assert ( !dDeferredIndexes.IsEmpty () && "Rotation queue must be checked before invoking rotation!");
Threads::StartJob ( [dIndexes = std::move ( dDeferredIndexes )] () mutable
{
// want to track rotation thread only at work
auto pDesc = PublishSystemInfo ( "ROTATION" );
sphLogDebug ( "TaskRotation starts with %d deferred tables", dIndexes.GetLength() );
for ( auto& tIndex : dIndexes )
{
ServedIndexRefPtr_c& pReplacementServed = tIndex.second;
const CSphString& sIndex = tIndex.first;
// cluster indexes got managed by different path
assert ( !ServedDesc_t::IsCluster ( pReplacementServed ) && "Rotation of clusters MUST never happens!" );
// prealloc RT and percolate here
StrVec_t dWarnings;
CSphString sError;
if ( ServedDesc_t::IsMutable ( pReplacementServed ) )
{
sphLogDebug ( "seamless rotate (prealloc) mutable table %s", sIndex.cstr() );
if ( PreallocNewIndex ( *pReplacementServed, sIndex.cstr(), dWarnings, sError ) )
g_pLocalIndexes->AddOrReplace ( pReplacementServed, sIndex );
else
sphWarning ( "table '%s': %s", sIndex.cstr(), sError.cstr() );
} else
{
sphLogDebug ( "seamless rotate local table %s", sIndex.cstr() );
if ( !RotateIndexMT ( pReplacementServed, sIndex, dWarnings, sError ) )
sphWarning ( "table '%s': %s", sIndex.cstr(), sError.cstr() );
}
for ( const auto& i : dWarnings )
sphWarning ( "table '%s': %s", sIndex.cstr(), i.cstr() );
g_pDistIndexes->Delete ( sIndex ); // postponed delete of same-named distributed (if any)
}
g_bInRotate = false;
RotateGlobalIdf();
sphInfo ( "rotating table: all tables done" );
});
}
template<typename FN_ACTION>
bool LimitedParallelRotationMT ( FN_ACTION&& fnAction ) EXCLUDES ( MainThread )
{
assert ( Threads::IsInsideCoroutine() );
// allow to run several rotations a time (in parallel)
// vip conns has no limit
if ( session::GetVip() )
return fnAction();
// limit is arbitrary set to N/2 of threadpool
static Coro::Waitable_T<int> iParallelRotations { 0 };
iParallelRotations.Wait ( [] ( int i ) { return i < Max ( 1, NThreads() / 2 ); } );
iParallelRotations.ModifyValue ( [] ( int& i ) { ++i; } );
AT_SCOPE_EXIT ( [] { iParallelRotations.ModifyValueAndNotifyOne ( [] ( int& i ) { --i; } ); } );
return fnAction();
}
bool LimitedRotateIndexMT ( ServedIndexRefPtr_c& pNewServed, const CSphString& sIndex, StrVec_t& dWarnings, CSphString& sError ) EXCLUDES ( MainThread )
{
return LimitedParallelRotationMT ( [&]() { return RotateIndexMT ( pNewServed, sIndex, dWarnings, sError ); } );
}
bool SwitchoverIndexSeamless ( const cServedIndexRefPtr_c& pServed, const char* szIndex, const CSphString& sBase, const CSphString& sNewPath, StrVec_t& dWarnings, CSphString& sError ) EXCLUDES ( MainThread )
{
return LimitedParallelRotationMT ( [&]() { return DoSwitchoverIndexSeamless ( pServed, szIndex, sBase, sNewPath, dWarnings, sError ); } );
}
void ConfigureLocalIndex ( ServedDesc_t * pIdx, const CSphConfigSection & hIndex, bool bMutableOpt, StrVec_t * pWarnings )
{
pIdx->m_tSettings.Load ( hIndex, bMutableOpt, pWarnings );
pIdx->m_sGlobalIDFPath = pIdx->m_tSettings.m_sGlobalIDFPath;
}
bool ConfigureDistributedIndex ( std::function<bool(const CSphString&)>&& fnCheck, DistributedIndex_t & tIdx, const char * szIndexName, const CSphConfigSection & hIndex, CSphString & sError, StrVec_t * pWarnings )
{
assert ( hIndex("type") && hIndex["type"]=="distributed" );
bool bSetHA = false;
// configure ha_strategy
if ( hIndex("ha_strategy") )
{
bSetHA = ParseStrategyHA ( hIndex["ha_strategy"].cstr(), tIdx.m_eHaStrategy );
if ( !bSetHA )
sphWarning ( "table '%s': ha_strategy (%s) is unknown for me, will use random", szIndexName, hIndex["ha_strategy"].cstr() );
}
bool bEnablePersistentConns = ( g_iPersistentPoolSize>0 );
if ( hIndex ( "agent_persistent" ) && !bEnablePersistentConns )
{
sphWarning ( "table '%s': agent_persistent used, but no persistent_connections_limit defined. Fall back to non-persistent agent", szIndexName );
bEnablePersistentConns = false;
}
// add local agents
StrVec_t dLocs;
for ( CSphVariant * pLocal = hIndex("local"); pLocal; pLocal = pLocal->m_pNext )
{
dLocs.Resize(0);
sphSplit ( dLocs, pLocal->cstr(), " \t," );
for ( const auto & sLocal: dLocs )
{
if ( !fnCheck ( sLocal ) )
{
sphWarning ( "table '%s': no such local table '%s', SKIPPED", szIndexName, sLocal.cstr() );
sError.SetSprintf ( "no such local table '%s'", sLocal.cstr() );
return false;
}
tIdx.m_dLocal.Add ( sLocal );
}
}
// index-level agent_retry_count
if ( hIndex ( "agent_retry_count" ) )
{
if ( hIndex["agent_retry_count"].intval ()<=0 )
sphWarning ( "table '%s': agent_retry_count must be positive, ignored", szIndexName );
else
tIdx.m_iAgentRetryCount = hIndex["agent_retry_count"].intval ();
}
if ( hIndex ( "mirror_retry_count" ) )
{
if ( hIndex["mirror_retry_count"].intval ()<=0 )
sphWarning ( "table '%s': mirror_retry_count must be positive, ignored", szIndexName );
else
{
if ( tIdx.m_iAgentRetryCount>0 )
sphWarning ("table '%s': `agent_retry_count` and `mirror_retry_count` both specified (they are aliases)."
"Value of `mirror_retry_count` will be used", szIndexName );
tIdx.m_iAgentRetryCount = hIndex["mirror_retry_count"].intval ();
}
}
if ( !tIdx.m_iAgentRetryCount )
tIdx.m_iAgentRetryCount = g_iAgentRetryCount;
// add remote agents
struct { const char* sSect; bool bBlh; bool bPrs; } dAgentVariants[] =
{
{ "agent", false, false},
{ "agent_persistent", false, bEnablePersistentConns },
{ "agent_blackhole", true, false }
};
for ( auto & tAg : dAgentVariants )
{
for ( CSphVariant * pAgentCnf = hIndex ( tAg.sSect ); pAgentCnf; pAgentCnf = pAgentCnf->m_pNext )
{
AgentOptions_t tAgentOptions { tAg.bBlh, tAg.bPrs, tIdx.m_eHaStrategy, tIdx.m_iAgentRetryCount, 0 };
auto pAgent = ConfigureMultiAgent ( pAgentCnf->cstr(), szIndexName, tAgentOptions, sError, pWarnings );
if ( !pAgent )
return false;
tIdx.m_dAgents.Add ( pAgent );
}
}
// configure options
if ( hIndex("agent_connect_timeout") )
{
if ( hIndex["agent_connect_timeout"].intval()<=0 )
sphWarning ( "table '%s': agent_connect_timeout must be positive, ignored", szIndexName );
else
tIdx.SetAgentConnectTimeoutMs ( hIndex.GetMsTimeMs ( "agent_connect_timeout" ) );
}
tIdx.m_bDivideRemoteRanges = hIndex.GetInt ( "divide_remote_ranges", 0 )!=0;
if ( hIndex("agent_query_timeout") )
{
if ( hIndex["agent_query_timeout"].intval()<=0 )
sphWarning ( "table '%s': agent_query_timeout must be positive, ignored", szIndexName );
else
tIdx.SetAgentQueryTimeoutMs ( hIndex.GetMsTimeMs ( "agent_query_timeout") );
}
bool bHaveHA = tIdx.m_dAgents.any_of ( [] ( const auto& ag ) { return ag->IsHA (); } );
// configure ha_strategy
if ( bSetHA && !bHaveHA && !IsConfigless() )
sphWarning ( "table '%s': ha_strategy defined, but no ha agents in the table", szIndexName );
return true;
}
//////////////////////////////////////////////////
/// configure distributed index and add it to hash
//////////////////////////////////////////////////
// AddIndex -> AddDistributedIndex
static ResultAndIndex_t AddDistributedIndex ( const char * szIndexName, const CSphConfigSection & hIndex, CSphString & sError, StrVec_t * pWarnings=nullptr )
{
DistributedIndexRefPtr_t pIdx ( new DistributedIndex_t );
bool bOk = ConfigureDistributedIndex ( [] ( const auto& sIdx ) { return g_pLocalIndexes->Contains ( sIdx ); }, *pIdx, szIndexName, hIndex, sError, pWarnings );
if ( !bOk || pIdx->IsEmpty () )
{
if ( !bOk )
sError.SetSprintf ( "table '%s': %s", szIndexName, sError.cstr() );
else
sError.SetSprintf ( "table '%s': no valid local/remote tables in distributed table", szIndexName );
return { ADD_ERROR, nullptr };
}
// finally, check and add distributed index to global table
if ( !g_pDistIndexes->Add ( pIdx, szIndexName ) )
{
sError.SetSprintf ( "table '%s': unable to add name (duplicate?)", szIndexName );
return { ADD_ERROR, nullptr };
}
return ResultAndIndex_t { ADD_DISTR, nullptr };
}
// common preconfiguration of mutable indexes
static bool ConfigureRTPercolate ( CSphSchema & tSchema, CSphIndexSettings & tSettings, const char * szIndexName, const CSphConfigSection & hIndex, bool bWordDict, bool bPercolate, StrVec_t * pWarnings, CSphString & sError )
{
// pick config settings
// they should be overriden later by Preload() if needed
{
CSphString sWarning;
if ( !tSettings.Setup ( hIndex, szIndexName, sWarning, sError ) )
{
sphWarning ( "table '%s': %s - NOT SERVING", szIndexName, sError.cstr() );
return false;
}
if ( !sWarning.IsEmpty() )
sphWarning ( "table '%s': %s", szIndexName, sWarning.cstr() );
}
if ( !sphRTSchemaConfigure ( hIndex, tSchema, tSettings, pWarnings, sError, bPercolate, bPercolate ) )
{
sphWarning ( "table '%s': %s - NOT SERVING", szIndexName, sError.cstr () );
return false;
}
if ( bPercolate )
FixPercolateSchema ( tSchema );
if ( !sError.IsEmpty() )
{
if ( pWarnings )
pWarnings->Add(sError);
else
sphWarning ( "table '%s': %s", szIndexName, sError.cstr () );
}
// path
if ( !hIndex ( "path" ) )
{
sphWarning ( "table '%s': path must be specified - NOT SERVING", szIndexName );
return false;
}
if ( !CheckStoredFields ( tSchema, tSettings, sError ) )
{
sphWarning ( "table '%s': %s - NOT SERVING", szIndexName, sError.cstr() );
return false;
}
int iIndexSP = hIndex.GetInt ( "index_sp" );
auto sIndexZones = hIndex.GetStr ( "index_zones" );
bool bHasStripEnabled ( hIndex.GetInt ( "html_strip" )!=0 );
if ( ( iIndexSP!=0 || !sIndexZones.IsEmpty() ) && !bHasStripEnabled )
{
// SENTENCE indexing w\o stripper is valid combination
if ( !sIndexZones.IsEmpty() )
{
sphWarning ( "table '%s': has index_sp=%d, index_zones='%s' but disabled html_strip - NOT SERVING", szIndexName, iIndexSP, sIndexZones.cstr() );
return false;
}
CSphString sWarning;
sWarning.SetSprintf ( "has index_sp=%d but disabled html_strip - PARAGRAPH unavailable", iIndexSP );
if ( pWarnings )
pWarnings->Add(sWarning);
else
sphWarning ( "table '%s': %s", szIndexName, sWarning.cstr() );
}
// upgrading schema to store field lengths
if ( tSettings.m_bIndexFieldLens )
if ( !AddFieldLens ( tSchema, false, sError ) )
{
sphWarning ( "table '%s': failed to create field lengths attributes: %s", szIndexName, sError.cstr () );
return false;
}
if ( bWordDict && ( tSettings.m_dPrefixFields.GetLength () || tSettings.m_dInfixFields.GetLength () ) )
{
CSphString sWarning = "prefix_fields and infix_fields has no effect with dict=keywords, ignoring";
if ( pWarnings )
pWarnings->Add(sWarning);
else
sphWarning ( "table '%s': %s", szIndexName, sWarning.cstr() );
}
if ( bWordDict && tSettings.m_iMinInfixLen==1 )
{
CSphString sWarning = "min_infix_len must be greater than 1, changed to 2";
if ( pWarnings )
pWarnings->Add(sWarning);
else
sphWarning ( "table '%s': %s", szIndexName, sWarning.cstr() );
tSettings.m_iMinInfixLen = 2;
}
tSchema.SetupFlags ( tSettings, bPercolate, pWarnings );
return true;
}
///////////////////////////////////////////////
/// create, configure and load realtime index
///////////////////////////////////////////////
static ResultAndIndex_t LoadRTPercolate ( bool bRT, const char* szIndexName, const CSphConfigSection& hIndex, bool bMutableOpt, StrVec_t* pWarnings, CSphString& sError )
{
bool bWordDict = true;
if ( bRT )
{
auto sIndexType = hIndex.GetStr ( "dict", "keywords" );
bWordDict = true;
if ( sIndexType=="crc" )
bWordDict = false;
else if ( sIndexType!="keywords" )
{
sError.SetSprintf ( "table '%s': unknown dict=%s; only 'keywords' or 'crc' values allowed", szIndexName, sIndexType.cstr() );
return { ADD_ERROR, nullptr };
}
}
CSphSchema tSchema ( szIndexName );
CSphIndexSettings tSettings;
if ( !ConfigureRTPercolate ( tSchema, tSettings, szIndexName, hIndex, bWordDict, !bRT, pWarnings, sError ))
return { ADD_ERROR, nullptr };
// index
auto pServed = MakeServedIndex();
ConfigureLocalIndex ( pServed, hIndex, bMutableOpt, pWarnings );
pServed->m_sIndexPath = hIndex["path"].strval();
auto bNeedBinlog = hIndex.GetBool ( "binlog" );
std::unique_ptr<CSphIndex> pIdx;
if ( bRT )
{
pIdx = sphCreateIndexRT ( szIndexName, pServed->m_sIndexPath, std::move ( tSchema ), pServed->m_tSettings.m_iMemLimit, bWordDict );
pServed->m_eType = IndexType_e::RT;
tSettings.m_bBinlog = bNeedBinlog;
if ( !bNeedBinlog )
pIdx->m_iTID = -1;
} else
{
if ( !bNeedBinlog )
{
sError.SetSprintf ( "table '%s': percolate without binlog not implemented", szIndexName );
return { ADD_ERROR, nullptr };
}
pIdx = CreateIndexPercolate ( szIndexName, pServed->m_sIndexPath, std::move ( tSchema ) );
pServed->m_eType = IndexType_e::PERCOLATE;
}
pIdx->SetMutableSettings ( pServed->m_tSettings );
pIdx->m_iExpansionLimit = g_iExpansionLimit;
pIdx->SetGlobalIDFPath ( pServed->m_sGlobalIDFPath );
pIdx->Setup ( tSettings );
pIdx->SetCacheSize ( g_iMaxCachedDocs, g_iMaxCachedHits );
pServed->SetIdx ( std::move ( pIdx ) );
return ResultAndIndex_t { ADD_NEEDLOAD, std::move ( pServed ) }; // use Leak to avoid extra addref/release on copying
}
////////////////////////////////////////////
/// configure and load local index
////////////////////////////////////////////
static ResultAndIndex_t LoadPlainIndex ( const char * szIndexName, const CSphConfigSection & hIndex, bool bMutableOpt, StrVec_t * pWarnings, CSphString & sError )
{
// check path
if ( !hIndex.Exists ( "path" ) )
{
sError = "key 'path' not found";
return { ADD_ERROR, nullptr };
}
ServedIndexRefPtr_c pServed = MakeServedIndex();
pServed->m_eType = IndexType_e::PLAIN;
// configure memlocking, star
ConfigureLocalIndex ( pServed, hIndex, bMutableOpt, pWarnings );
// try to create index
pServed->m_sIndexPath = hIndex["path"].strval ();
auto pIdx = sphCreateIndexPhrase ( szIndexName, RedirectToRealPath ( pServed->m_sIndexPath ) );
pIdx->m_iExpansionLimit = g_iExpansionLimit;
pIdx->SetMutableSettings ( pServed->m_tSettings );
pIdx->SetGlobalIDFPath ( pServed->m_sGlobalIDFPath );
pIdx->SetCacheSize ( g_iMaxCachedDocs, g_iMaxCachedHits );
pServed->SetIdx ( std::move ( pIdx ) );
return ResultAndIndex_t { ADD_NEEDLOAD, std::move ( pServed ) };
}
///////////////////////////////////////////////
/// make and configure template index
///////////////////////////////////////////////
static ResultAndIndex_t LoadTemplateIndex ( const char * szIndexName, const CSphConfigSection &hIndex, bool bMutableOpt, StrVec_t * pWarnings )
{
CSphIndexSettings tSettings;
CSphString sWarning, sError;
if ( !tSettings.Setup ( hIndex, szIndexName, sWarning, sError ) )
{
sphWarning ( "failed to configure table %s: %s", szIndexName, sError.cstr () );
return { ADD_ERROR, nullptr };
}
if ( !sWarning.IsEmpty() )
sphWarning ( "table '%s': %s - NOT SERVING", szIndexName, sWarning.cstr () );
auto pIdx = sphCreateIndexTemplate ( szIndexName );
pIdx->Setup ( tSettings );
auto pServed = MakeServedIndex();
pServed->m_eType = IndexType_e::TEMPLATE;
// configure memlocking, star
ConfigureLocalIndex ( pServed, hIndex, bMutableOpt, pWarnings );
pIdx->SetMutableSettings ( pServed->m_tSettings );
pIdx->m_iExpansionLimit = g_iExpansionLimit;
std::unique_ptr<FilenameBuilder_i> pFilenameBuilder = CreateFilenameBuilder ( szIndexName );
StrVec_t dWarnings;
if ( !sphFixupIndexSettings ( pIdx.get(), hIndex, g_bStripPath, pFilenameBuilder.get(), dWarnings, sError ) )
{
sphWarning ( "table '%s': %s - NOT SERVING", szIndexName, sError.cstr () );
return { ADD_ERROR, nullptr };
}
for ( const auto & i : dWarnings )
sphWarning ( "table '%s': %s", szIndexName, i.cstr() );
// templates we either add, either replace depending on requested action
// at this point they are production-ready
pServed->SetIdx ( std::move ( pIdx ) );
return ResultAndIndex_t { ADD_SERVED, std::move ( pServed ) };
}
// HandleCommandClusterPq() -> RemoteLoadIndex() -> LoadIndex() -> AddIndex() // only Percolate! From other threads
// HandleMysqlCreateTable() -> CreateNewIndexConfigless() -> AddIndex() // from other threads
// ServiceMain() -> TickHead() -> CheckRotate() -> ReloadConfigAndRotateIndexes() -> AddIndex()
// ConfigureAndPreloadIndex() -> AddIndex() // maybe from non-main thread!
// ClientSession_c::Execute -> HandleMysqlImportTable -> AddExistingIndexConfigless -> ConfiglessPreloadIndex -> ConfigureAndPreloadIndex -> AddIndex
ResultAndIndex_t AddIndex ( const char * szIndexName, const CSphConfigSection & hIndex, bool bCheckDupe, bool bMutableOpt, StrVec_t * pWarnings, CSphString & sError )
{
// check name
if ( bCheckDupe && IndexIsServed ( szIndexName ) )
{
sError = "duplicate name";
sphWarning ( "table '%s': duplicate name - NOT SERVING", szIndexName );
return { ADD_ERROR, nullptr };
}
switch ( TypeOfIndexConfig ( hIndex.GetStr ( "type", nullptr )))
{
case IndexType_e::DISTR:
return AddDistributedIndex ( szIndexName, hIndex, sError, pWarnings );
case IndexType_e::RT:
return LoadRTPercolate ( true, szIndexName, hIndex, bMutableOpt, pWarnings, sError );
case IndexType_e::PERCOLATE:
return LoadRTPercolate ( false, szIndexName, hIndex, bMutableOpt, pWarnings, sError );
case IndexType_e::TEMPLATE:
return LoadTemplateIndex ( szIndexName, hIndex, bMutableOpt, pWarnings );
case IndexType_e::PLAIN:
return LoadPlainIndex ( szIndexName, hIndex, bMutableOpt, pWarnings, sError );
case IndexType_e::ERROR_:
default:
break;
}
sphWarning ( "table '%s': unknown type '%s' - NOT SERVING", szIndexName, hIndex["type"].cstr() );
return { ADD_ERROR, nullptr };
}
// add or remove persistent pools to hosts
void InitPersistentPool()
{
if ( !g_iPersistentPoolSize )
{
ClosePersistentSockets();
return;
}
Dashboard::GetActiveHosts ().Apply ( [] ( HostDashboardRefPtr_t& pHost ) {
if ( !pHost->m_pPersPool )
pHost->m_pPersPool = new PersistentConnectionsPool_c;
pHost->m_pPersPool->ReInit ( g_iPersistentPoolSize );
} );
}
// special pass for 'simple' rotation (i.e. *.new to current)
static void IssuePlainOldRotation ( HashOfServed_c& hDeferred )
{
ConfigReloader_c tReloader { hDeferred };
tReloader.IssuePlainOldRotation();
}
// Reloading called always from same thread (so, for now not need to be th-safe for itself)
// ServiceMain() -> TickHead() -> CheckRotate() -> ReloadConfigAndRotateIndexes().
static void ReloadIndexesFromConfig ( const CSphConfig& hConf, HashOfServed_c& hDeferred ) REQUIRES ( MainThread )
{
assert ( !IsConfigless() );
if ( !hConf.Exists ("index") )
{
sphInfo ( "No tables found in config came to rotation. Abort reloading");
return;
}
ConfigReloader_c tReloader { hDeferred };
for ( const auto& dIndex : hConf["index"] )
{
const auto & sIndexName = dIndex.first;
const CSphConfigSection & hIndex = dIndex.second;
IndexType_e eNewType = TypeOfIndexConfig ( hIndex.GetStr ( "type", nullptr ) );
if ( eNewType==IndexType_e::ERROR_ )
continue;
tReloader.LoadIndexFromConfig ( sIndexName, eNewType, hIndex );
}
InitPersistentPool();
}
struct IndexWithPriority_t
{
CSphString m_sIndex;
StrVec_t m_dKilllistTargets;
int m_iPriority {-1};
int m_nReferences {0};
};
static void SetIndexPriority ( IndexWithPriority_t & tIndex, int iPriority, const SmallStringHash_T<IndexWithPriority_t> & tIndexHash )
{
tIndex.m_iPriority = Max ( tIndex.m_iPriority, iPriority );
for ( const auto & i : tIndex.m_dKilllistTargets )
{
IndexWithPriority_t * pIdx = tIndexHash(i);
if ( pIdx )
SetIndexPriority ( *pIdx, iPriority+1, tIndexHash );
}
}
static VecOfServed_c ConvertHashToPrioritySortedVec ( const HashOfServed_c& hDeferredIndexes ) REQUIRES ( MainThread )
{
SmallStringHash_T<IndexWithPriority_t> tIndexesToRotate;
VecOfServed_c dResult;
for ( const auto& it : hDeferredIndexes )
{
assert ( it.second );
// check for rt/percolate. they don't need killlist_target
if ( !ServedDesc_t::IsMutable ( it.second ) && g_pLocalIndexes->Contains ( it.first ) )
{
IndexWithPriority_t tToRotate;
tToRotate.m_sIndex = it.first;
tToRotate.m_dKilllistTargets = it.second->m_dKilllistTargets;
tIndexesToRotate.Add ( std::move ( tToRotate ), it.first );
}
else
dResult.Add ( { it.first, it.second } ); // out or priority - will be processed first.
};
// set priorities
for ( const auto& tIndexToRotate : tIndexesToRotate )
for ( const auto & i : tIndexToRotate.second.m_dKilllistTargets )
{
IndexWithPriority_t * pIdx = tIndexesToRotate(i);
if ( pIdx )
++pIdx->m_nReferences;
}
// start with the least-referenced index
IndexWithPriority_t * pMin;
do
{
pMin = nullptr;
for ( auto & tIndexToRotate : tIndexesToRotate )
{
auto & tIdx = tIndexToRotate.second;
if ( tIdx.m_iPriority==-1 && ( !pMin || tIdx.m_nReferences<pMin->m_nReferences ) )
pMin = &tIdx;
}
if ( pMin )
SetIndexPriority ( *pMin, 0, tIndexesToRotate );
}
while ( pMin );
// collect and sort by priority processed indexes names
StrVec_t dSorted;
for ( auto& tIndexToRotate : tIndexesToRotate )
dSorted.Add ( tIndexToRotate.first );
dSorted.Sort ( Lesser ( [&tIndexesToRotate] ( auto a, auto b ) { return tIndexesToRotate[a].m_iPriority < tIndexesToRotate[a].m_iPriority; } ) );
// append priority names to non-prioritized
for ( const auto& sIdx : dSorted )
dResult.Add ( { sIdx, hDeferredIndexes[sIdx] } );
return dResult;
}
// ServiceMain() -> TickHead() -> CheckRotate() -> CheckIndexesForSeamlessAndStartRotation()
static void CheckIndexesForSeamlessAndStartRotation ( VecOfServed_c dDeferredIndexes ) REQUIRES ( MainThread )
{
// check what indexes need to be rotated
int iNotCapableForSeamlessRotation = 0;
ARRAY_FOREACH ( i, dDeferredIndexes )
{
const auto& sIdx = dDeferredIndexes[i].first;
auto* pIndex = dDeferredIndexes[i].second.Ptr();
assert ( pIndex );
if ( !ServedDesc_t::IsMutable ( pIndex ) && CheckIndexRotate_c ( *pIndex, CheckIndexRotate_c::CheckLink ).NothingToRotate() )
{
++iNotCapableForSeamlessRotation;
sphLogDebug ( "queue[] = %s", sIdx.cstr() );
sphLogDebug ( "Index %s (%s) is not capable for seamless rotate. Skipping", sIdx.cstr ()
, pIndex->m_sIndexPath.cstr () );
dDeferredIndexes.Remove(i--);
}
}
if ( iNotCapableForSeamlessRotation )
sphWarning ( "internal error: non-empty queue on a rotation cycle start, got %d elements", iNotCapableForSeamlessRotation );
if ( dDeferredIndexes.IsEmpty () )
{
sphInfo ( "nothing to rotate after SIGHUP" );
g_bInRotate = false;
return;
}
InvokeRotation ( std::move ( dDeferredIndexes ) );
}
// hDeferredIndexes includes both - fresh new, changed and 'just new to current' indexes.
static void DoGreedyRotation ( VecOfServed_c&& dDeferredIndexes ) REQUIRES ( MainThread )
{
assert ( !g_bSeamlessRotate );
ScRL_t tRotateConfigMutex { g_tRotateConfigMutex };
for ( auto& dDeferredIndex : dDeferredIndexes )
{
const CSphString& sDeferredIndex = dDeferredIndex.first;
ServedIndexRefPtr_c& pDeferredIndex = dDeferredIndex.second;
assert ( pDeferredIndex );
CSphString sError;
StrVec_t dWarnings;
// prealloc RT and percolate here
if ( ServedDesc_t::IsMutable ( pDeferredIndex ) )
{
sphLogDebug ( "greedy rotate (prealloc) mutable %s", sDeferredIndex.cstr() );
if ( PreallocNewIndex ( *pDeferredIndex, &g_hCfg["index"][sDeferredIndex], sDeferredIndex.cstr(), dWarnings, sError ) )
g_pLocalIndexes->AddOrReplace ( pDeferredIndex, sDeferredIndex );
else
sphWarning ( "table '%s': %s - NOT SERVING", sDeferredIndex.cstr(), sError.cstr() );
}
else if ( pDeferredIndex->m_eType==IndexType_e::PLAIN )
{
sphLogDebug ( "greedy rotate local %s", sDeferredIndex.cstr() );
auto pRotating = GetServed ( sDeferredIndex );
bool bSame = pRotating && pRotating.Ptr() == pDeferredIndex.Ptr();
WIdx_c WIdx { pDeferredIndex };
bool bOk = RotateIndexGreedy ( *pDeferredIndex, sDeferredIndex.cstr(), sError );
if ( !bOk )
sphWarning ( "table '%s': %s - NOT SERVING", sDeferredIndex.cstr(), sError.cstr() );
if ( !bSame && bOk && !sphFixupIndexSettings ( WIdx, g_hCfg["index"][sDeferredIndex], g_bStripPath, nullptr, dWarnings, sError ) )
{
sphWarning ( "table '%s': %s - NOT SERVING", sDeferredIndex.cstr(), sError.cstr() );
bOk = false;
}
if ( bOk )
{
WIdx->Preread();
pDeferredIndex->UpdateMass();
g_pLocalIndexes->AddOrReplace ( pDeferredIndex, sDeferredIndex );
}
}
for ( const auto & i : dWarnings )
sphWarning ( "table '%s': %s", sDeferredIndex.cstr(), i.cstr() );
g_pDistIndexes->Delete ( sDeferredIndex ); // postponed delete of same-named distributed (if any)
}
// assert ( dDeferredIndexes.IsEmpty() );
g_bInRotate = false;
RotateGlobalIdf ();
sphInfo ( "rotating finished" );
}
// ServiceMain() -> TickHead() -> [CallCoroutine] -> CheckRotate()
static void CheckRotate () REQUIRES ( MainThread )
{
// do we need to rotate now? If no sigHUP received, or if we are already rotating - no.
// if ( !g_bNeedRotate || g_bInRotate || IsConfigless() )
// return;
assert ( !IsConfigless() );
g_bInRotate = true; // ok, another rotation cycle just started
g_bNeedRotate = false; // which therefore clears any previous HUP signals
sphLogDebug ( "CheckRotate invoked" );
bool bReloadHappened = false;
HashOfServed_c hDeferredIndexes;
{
auto [bChanged, dConfig] = FetchAndCheckIfChanged ( g_sConfigFile );
if ( bChanged || g_bReloadForced )
{
sphInfo( "Config changed (read %d chars)", dConfig.GetLength());
if ( !dConfig.IsEmpty() )
{
{
ScWL_t dRotateConfigMutexWlocked { g_tRotateConfigMutex };
bReloadHappened = ParseConfig ( &g_hCfg, g_sConfigFile, dConfig );
}
if ( bReloadHappened )
{
ScRL_t dRotateConfigMutexRlocked { g_tRotateConfigMutex };
ReloadIndexesFromConfig ( g_hCfg, hDeferredIndexes );
} else
sphWarning ( "failed to parse config file '%s': %s; using previous settings", g_sConfigFile.cstr(), TlsMsg::szError() );
}
}
g_bReloadForced = false;
}
if ( !bReloadHappened )
IssuePlainOldRotation ( hDeferredIndexes );
VecOfServed_c dDeferredIndexes = ConvertHashToPrioritySortedVec ( hDeferredIndexes );
for ( const auto& s : dDeferredIndexes )
sphLogDebug ( "will rotate %s", s.first.cstr() );
if ( g_bSeamlessRotate )
CheckIndexesForSeamlessAndStartRotation ( std::move ( dDeferredIndexes ) );
else
DoGreedyRotation ( std::move ( dDeferredIndexes ) );
}
void CheckReopenLogs () REQUIRES ( MainThread )
{
if ( !g_bGotSigusr1 )
return;
// reopen searchd log
if ( g_iLogFile>=0 && !g_bLogTty )
{
int iFD = ::open ( g_sLogFile.cstr(), O_CREAT | O_RDWR | O_APPEND, S_IREAD | S_IWRITE );
if ( iFD<0 )
{
sphWarning ( "failed to reopen log file '%s': %s", g_sLogFile.cstr(), strerrorm(errno) );
} else
{
::close ( g_iLogFile );
g_iLogFile = iFD;
g_bLogTty = ( isatty ( g_iLogFile )!=0 );
LogChangeMode ( g_iLogFile, g_iLogFileMode );
sphInfo ( "log reopened" );
}
}
// reopen query log
if ( !g_bQuerySyslog && g_iQueryLogFile!=g_iLogFile && g_iQueryLogFile>=0 && !isatty ( g_iQueryLogFile ) )
{
int iFD = ::open ( g_sQueryLogFile.cstr(), O_CREAT | O_RDWR | O_APPEND, S_IREAD | S_IWRITE );
if ( iFD<0 )
{
sphWarning ( "failed to reopen query log file '%s': %s", g_sQueryLogFile.cstr(), strerrorm(errno) );
} else
{
::close ( g_iQueryLogFile );
g_iQueryLogFile = iFD;
LogChangeMode ( g_iQueryLogFile, g_iLogFileMode );
sphInfo ( "query log reopened" );
}
}
g_bGotSigusr1 = 0;
}
#if !_WIN32
#define WINAPI
#else
SERVICE_STATUS g_ss;
SERVICE_STATUS_HANDLE g_ssHandle;
void MySetServiceStatus ( DWORD dwCurrentState, DWORD dwWin32ExitCode, DWORD dwWaitHint )
{
static DWORD dwCheckPoint = 1;
if ( dwCurrentState==SERVICE_START_PENDING )
g_ss.dwControlsAccepted = 0;
else
g_ss.dwControlsAccepted = SERVICE_ACCEPT_STOP | SERVICE_ACCEPT_SHUTDOWN;
g_ss.dwCurrentState = dwCurrentState;
g_ss.dwWin32ExitCode = dwWin32ExitCode;
g_ss.dwWaitHint = dwWaitHint;
if ( dwCurrentState==SERVICE_RUNNING || dwCurrentState==SERVICE_STOPPED )
g_ss.dwCheckPoint = 0;
else
g_ss.dwCheckPoint = dwCheckPoint++;
SetServiceStatus ( g_ssHandle, &g_ss );
}
void WINAPI ServiceControl ( DWORD dwControlCode )
{
switch ( dwControlCode )
{
case SERVICE_CONTROL_STOP:
case SERVICE_CONTROL_SHUTDOWN:
MySetServiceStatus ( SERVICE_STOP_PENDING, NO_ERROR, 0 );
g_bServiceStop = true;
break;
default:
MySetServiceStatus ( g_ss.dwCurrentState, NO_ERROR, 0 );
break;
}
}
// warning! static buffer, non-reentrable
const char * WinErrorInfo ()
{
static char sBuf[1024];
DWORD uErr = ::GetLastError ();
snprintf ( sBuf, sizeof(sBuf), "code=%d, error=", uErr );
auto iLen = (int) strlen(sBuf);
if ( !FormatMessage ( FORMAT_MESSAGE_FROM_SYSTEM, NULL, uErr, 0, sBuf+iLen, sizeof(sBuf)-iLen, NULL ) ) // FIXME? force US-english langid?
snprintf ( sBuf+iLen, sizeof(sBuf)-iLen, "(no message)" );
return sBuf;
}
SC_HANDLE ServiceOpenManager ()
{
SC_HANDLE hSCM = OpenSCManager (
NULL, // local computer
NULL, // ServicesActive database
SC_MANAGER_ALL_ACCESS ); // full access rights
if ( hSCM==NULL )
sphFatal ( "OpenSCManager() failed: %s", WinErrorInfo() );
return hSCM;
}
void AppendArg ( char * sBuf, int iBufLimit, const char * sArg )
{
char * sBufMax = sBuf + iBufLimit - 2; // reserve place for opening space and trailing zero
sBuf += strlen(sBuf);
if ( sBuf>=sBufMax )
return;
auto iArgLen = (int) strlen(sArg);
bool bQuote = false;
for ( int i=0; i<iArgLen && !bQuote; i++ )
if ( sArg[i]==' ' || sArg[i]=='"' )
bQuote = true;
*sBuf++ = ' ';
if ( !bQuote )
{
// just copy
int iToCopy = Min ( sBufMax-sBuf, iArgLen );
memcpy ( sBuf, sArg, iToCopy );
sBuf[iToCopy] = '\0';
} else
{
// quote
sBufMax -= 2; // reserve place for quotes
if ( sBuf>=sBufMax )
return;
*sBuf++ = '"';
while ( sBuf<sBufMax && *sArg )
{
if ( *sArg=='"' )
{
// quote
if ( sBuf<sBufMax-1 )
{
*sBuf++ = '\\';
*sBuf++ = *sArg++;
}
} else
{
// copy
*sBuf++ = *sArg++;
}
}
*sBuf++ = '"';
*sBuf++ = '\0';
}
}
void ServiceInstall ( int argc, char ** argv )
{
if ( g_bService )
return;
sphInfo ( "Installing service..." );
char szBinary[MAX_PATH];
if ( !GetModuleFileName ( NULL, szBinary, MAX_PATH ) )
sphFatal ( "GetModuleFileName() failed: %s", WinErrorInfo() );
char szPath[MAX_PATH];
szPath[0] = '\0';
AppendArg ( szPath, sizeof(szPath), szBinary );
AppendArg ( szPath, sizeof(szPath), "--ntservice" );
for ( int i=1; i<argc; i++ )
if ( strcmp ( argv[i], "--install" ) )
AppendArg ( szPath, sizeof(szPath), argv[i] );
SC_HANDLE hSCM = ServiceOpenManager ();
SC_HANDLE hService = CreateService (
hSCM, // SCM database
g_sServiceName, // name of service
g_sServiceName, // service name to display
SERVICE_ALL_ACCESS, // desired access
SERVICE_WIN32_OWN_PROCESS, // service type
SERVICE_AUTO_START, // start type
SERVICE_ERROR_NORMAL, // error control type
szPath+1, // path to service's binary
NULL, // no load ordering group
NULL, // no tag identifier
NULL, // no dependencies
NULL, // LocalSystem account
NULL ); // no password
if ( !hService )
{
CloseServiceHandle ( hSCM );
sphFatal ( "CreateService() failed: %s", WinErrorInfo() );
} else
{
sphInfo ( "Service '%s' installed successfully.", g_sServiceName );
}
CSphString sDesc;
sDesc.SetSprintf ( "%s-%s", g_sServiceName, g_sStatusVersion.cstr() );
SERVICE_DESCRIPTION tDesc;
tDesc.lpDescription = (LPSTR) sDesc.cstr();
if ( !ChangeServiceConfig2 ( hService, SERVICE_CONFIG_DESCRIPTION, &tDesc ) )
sphWarning ( "failed to set service description" );
CloseServiceHandle ( hService );
CloseServiceHandle ( hSCM );
}
void ServiceDelete ()
{
if ( g_bService )
return;
sphInfo ( "Deleting service..." );
// open manager
SC_HANDLE hSCM = ServiceOpenManager ();
// open service
SC_HANDLE hService = OpenService ( hSCM, g_sServiceName, DELETE );
if ( !hService )
{
CloseServiceHandle ( hSCM );
sphFatal ( "OpenService() failed: %s", WinErrorInfo() );
}
// do delete
bool bRes = !!DeleteService ( hService );
CloseServiceHandle ( hService );
CloseServiceHandle ( hSCM );
if ( !bRes )
sphFatal ( "DeleteService() failed: %s", WinErrorInfo() );
else
sphInfo ( "Service '%s' deleted successfully.", g_sServiceName );
}
#endif // _WIN32
void ShowHelp ()
{
fprintf ( stdout,
"Usage: searchd [OPTIONS]\n"
"\n"
"Options are:\n"
"-h, --help\t\tdisplay this help message\n"
"-v, --version\t\tdisplay version information\n"
"-c, --config <file>\tread configuration from specified file\n"
"\t\t\t(default is manticore.conf)\n"
"--stop\t\t\tsend SIGTERM to currently running searchd\n"
"--stopwait\t\tsend SIGTERM and wait until actual exit\n"
"--status\t\tget ant print status variables\n"
"\t\t\t(PID is taken from pid_file specified in config file)\n"
"--iostats\t\tlog per-query io stats\n"
"--cpustats\t\tlog per-query cpu stats\n"
#if _WIN32
"--install\t\tinstall as Windows service\n"
"--delete\t\tdelete Windows service\n"
"--servicename <name>\tuse given service name (default is 'searchd')\n"
"--ntservice\t\tinternal option used to invoke a Windows service\n"
#endif
"--strip-path\t\tstrip paths from stopwords, wordforms, exceptions\n"
"\t\t\tand other file names stored in the table header\n"
"--replay-flags=<OPTIONS>\n"
"\t\t\textra binary log replay options (current options \n"
"\t\t\tare 'accept-desc-timestamp' and 'ignore-open-errors')\n"
"--new-cluster\tbootstraps a replication cluster with cluster restart protection\n"
"--new-cluster-force\tbootstraps a replication cluster without cluster restart protection\n"
"\n"
"Debugging options are:\n"
"--console\t\trun in console mode (do not fork, do not log to files)\n"
"-p, --port <port>\tlisten on given port (overrides config setting)\n"
"-l, --listen <spec>\tlisten on given address, port or path (overrides\n"
"\t\t\tconfig settings)\n"
"-i, --index <index>\tonly serve given table(s)\n"
"-t, --table <table>\tonly serve given table(s)\n"
#if !_WIN32
"--nodetach\t\tdo not detach into background\n"
#endif
"--logdebug, --logdebugv, --logdebugvv\n"
"\t\t\tenable additional debug information logging\n"
"\t\t\t(with different verboseness)\n"
"--pidfile\t\tforce using the PID file (useful with --console)\n"
"--safetrace\t\tonly use system backtrace() call in crash reports\n"
"--coredump\t\tsave core dump file on crash\n"
"\n"
"Examples:\n"
"searchd --config /usr/local/sphinx/etc/manticore.conf\n"
#if _WIN32
"searchd --install --config c:\\sphinx\\manticore.conf\n"
#endif
);
}
void InitSharedBuffer ()
{
static CSphLargeBuffer<SharedData_t, true> g_dShared;
CSphString sError;
if ( !g_dShared.Alloc ( 1, sError ) )
sphDie ( "failed to allocate shared buffer (msg=%s)", sError.cstr() );
// reset
g_pShared = g_dShared.GetWritePtr();
g_pShared->m_bDaemonAtShutdown = false;
g_pShared->m_bHaveTTY = false;
}
#if _WIN32
BOOL WINAPI CtrlHandler ( DWORD )
{
if ( !g_bService )
sphInterruptNow();
return TRUE;
}
#endif
#if !_WIN32
static char g_sNameBuf[512] = { 0 };
static char g_sPid[30] = { 0 };
// returns 'true' only once - at the very start, to show it beatiful way.
bool SetWatchDog ( int iDevNull ) REQUIRES ( MainThread )
{
InitSharedBuffer ();
// Fork #1 - detach from controlling terminal
switch ( fork() )
{
case -1:
// error
sphFatalLog ( "fork() failed (reason: %s)", strerrorm ( errno ) );
exit ( 1 );
case 0:
// daemonized child - or new and free watchdog :)
break;
default:
// tty-controlled parent
while ( !g_pShared->m_bHaveTTY )
sphSleepMsec ( 100 );
exit ( 0 );
}
// became the session leader
if ( setsid()==-1 )
{
sphFatalLog ( "setsid() failed (reason: %s)", strerrorm ( errno ) );
exit ( 1 );
}
// Fork #2 - detach from session leadership (may be not necessary, however)
switch ( fork() )
{
case -1:
// error
sphFatalLog ( "fork() failed (reason: %s)", strerrorm ( errno ) );
exit ( 1 );
case 0:
// daemonized child - or new and free watchdog :)
break;
default:
// tty-controlled parent
exit ( 0 );
}
// save path to our binary
g_sNameBuf[::readlink ( "/proc/self/exe", g_sNameBuf, 511 )] = 0;
// now we are the watchdog. Let us fork the actual process
enum class EFork { Startup, Disabled, Restart } eReincarnate = EFork::Startup;
bool bShutdown = false;
bool bStreamsActive = true;
int iChild = 0;
g_iParentPID = getpid();
assert ( g_pShared );
while (true)
{
if ( eReincarnate!=EFork::Disabled )
iChild = fork();
if ( iChild==-1 )
{
sphFatalLog ( "fork() failed during watchdog setup (error=%s)", strerrorm(errno) );
exit ( 1 );
}
// child process; return true to show that we have to reload everything
if ( iChild==0 )
{
atexit ( &ReleaseTTYFlag );
return bStreamsActive;
}
// parent process, watchdog
// close the io files
if ( bStreamsActive )
{
close ( STDIN_FILENO );
close ( STDOUT_FILENO );
close ( STDERR_FILENO );
dup2 ( iDevNull, STDIN_FILENO );
dup2 ( iDevNull, STDOUT_FILENO );
dup2 ( iDevNull, STDERR_FILENO );
bStreamsActive = false;
}
if ( eReincarnate!=EFork::Disabled )
{
sphInfo ( "watchdog: main process %d forked ok", iChild );
snprintf ( g_sPid, sizeof(g_sPid), "%d", iChild);
}
SetSignalHandlers();
eReincarnate = EFork::Disabled;
int iPid, iStatus;
while ( ( iPid = wait ( &iStatus ) )>0 )
{
const char * sWillRestart = ( g_pShared->m_bDaemonAtShutdown ? "will not be restarted (daemon is shutting down)" : "will be restarted" );
assert ( iPid==iChild );
if ( WIFEXITED ( iStatus ) )
{
int iExit = WEXITSTATUS ( iStatus );
if ( iExit==2 || iExit==6 ) // really crash
{
sphInfo ( "watchdog: main process %d crashed via CRASH_EXIT (exit code %d), %s", iPid, iExit, sWillRestart );
eReincarnate = EFork::Restart;
} else
{
sphInfo ( "watchdog: main process %d exited cleanly (exit code %d), shutting down", iPid, iExit );
bShutdown = true;
}
} else if ( WIFSIGNALED ( iStatus ) )
{
int iSig = WTERMSIG ( iStatus );
const char * sSig = NULL;
if ( iSig==SIGINT )
sSig = "SIGINT";
else if ( iSig==SIGTERM )
sSig = "SIGTERM";
else if ( WATCHDOG_SIGKILL && iSig==SIGKILL )
sSig = "SIGKILL";
if ( sSig )
{
sphInfo ( "watchdog: main process %d killed cleanly with %s, shutting down", iPid, sSig );
bShutdown = true;
} else
{
if ( WCOREDUMP ( iStatus ) )
sphInfo ( "watchdog: main process %d killed dirtily with signal %d, core dumped, %s",
iPid, iSig, sWillRestart );
else
sphInfo ( "watchdog: main process %d killed dirtily with signal %d, %s",
iPid, iSig, sWillRestart );
eReincarnate = EFork::Restart;
}
} else if ( WIFSTOPPED ( iStatus ) )
sphInfo ( "watchdog: main process %d stopped with signal %d", iPid, WSTOPSIG ( iStatus ) );
#ifdef WIFCONTINUED
else if ( WIFCONTINUED ( iStatus ) )
sphInfo ( "watchdog: main process %d resumed", iPid );
#endif
}
if ( iPid==-1 )
{
if ( g_bGotSigusr2 )
{
g_bGotSigusr2 = 0;
sphInfo ( "watchdog: got USR2, performing dump of child's stack" );
sphDumpGdb ( g_iLogFile, g_sNameBuf, g_sPid );
}
}
if ( bShutdown || sphInterrupted() || g_pShared->m_bDaemonAtShutdown )
{
exit ( 0 );
}
}
}
#else
const int WIN32_PIPE_BUFSIZE=32;
#endif // !_WIN32
/// check for incoming signals, and react on them
void CheckSignals () REQUIRES ( MainThread )
{
#if _WIN32
if ( g_bService && g_bServiceStop )
{
Shutdown ();
MySetServiceStatus ( SERVICE_STOPPED, NO_ERROR, 0 );
exit ( 0 );
}
#endif
if ( g_bGotSighup )
{
sphInfo ( "caught SIGHUP (seamless=%d, in_rotate=%d, need_rotate=%d)", (int)g_bSeamlessRotate, (int)g_bInRotate, (int)g_bNeedRotate );
g_bNeedRotate = true;
g_bGotSighup = false;
}
if ( sphInterrupted() )
{
sphInfo ( "caught SIGTERM, shutting down" );
Shutdown ();
exit ( 0 );
}
#if _WIN32
BYTE dPipeInBuf [ WIN32_PIPE_BUFSIZE ];
DWORD nBytesRead = 0;
BOOL bSuccess = ReadFile ( g_hPipe, dPipeInBuf, WIN32_PIPE_BUFSIZE, &nBytesRead, NULL );
if ( nBytesRead > 0 && bSuccess )
{
for ( DWORD i=0; i<nBytesRead; i++ )
{
switch ( dPipeInBuf[i] )
{
case 0:
g_bGotSighup = 1;
break;
case 1:
sphInterruptNow();
if ( g_bService )
g_bServiceStop = true;
break;
}
}
DisconnectNamedPipe ( g_hPipe );
ConnectNamedPipe ( g_hPipe, NULL );
}
#endif
}
static bool g_bLoadInfo = val_from_env ( "MANTICORE_TRACE_LOAD", false );
void TickHead () REQUIRES ( MainThread )
{
CheckSignals ();
CheckLeaks ();
CheckReopenLogs ();
if ( g_bNeedRotate && !g_bInRotate && !IsConfigless() )
Threads::CallCoroutine ( [] {
ScopedRole_c thMain ( MainThread );
CheckRotate();
} );
sphInfo ( nullptr ); // flush dupes
#if _WIN32
// at windows there is no signals that interrupt sleep
// need to sleep less to make main loop more responsible
int tmSleep = 100;
#else
int tmSleep = 500;
#endif
sphSleepMsec ( tmSleep );
if ( sphMicroTimer() > g_iNextExpMeterTimestamp )
{
g_iNextExpMeterTimestamp += g_iExpMeterPeriod;
auto tSample = GlobalWorkPool()->Tasks();
auto tCurrent = GlobalWorkPool()->CurTasks();
g_tPriStat1m.Tick ( tSample.iPri );
g_tPriStat5m.Tick ( tSample.iPri );
g_tPriStat15m.Tick ( tSample.iPri );
g_tSecStat1m.Tick ( tSample.iSec );
g_tSecStat5m.Tick ( tSample.iSec );
g_tSecStat15m.Tick ( tSample.iSec );
g_tStat1m.Tick ( tSample.iPri + tSample.iSec + tCurrent);
g_tStat5m.Tick ( tSample.iPri + tSample.iSec + tCurrent);
g_tStat15m.Tick ( tSample.iPri + tSample.iSec + tCurrent);
if ( g_bLoadInfo )
sphInfo("Sample: %d, %d, %d; Load average: %0.2f, %0.2f, %0.2f, sec: %0.2f, %0.2f, %0.2f, pri: %0.2f, %0.2f, %0.2f", tCurrent, tSample.iSec, tSample.iPri, g_tStat1m.Value(), g_tStat5m.Value(), g_tStat15m.Value(), g_tSecStat1m.Value(), g_tSecStat5m.Value(), g_tSecStat15m.Value(), g_tPriStat1m.Value(), g_tPriStat5m.Value(), g_tPriStat15m.Value());
}
}
bool g_bVtune = false;
int64_t g_tmStarted = 0;
static int g_iNetWorkers = 1;
/////////////////////////////////////////////////////////////////////////////
// DAEMON OPTIONS
/////////////////////////////////////////////////////////////////////////////
static std::unique_ptr<QueryParser_i> PercolateQueryParserFactory ( bool bJson )
{
if ( bJson )
return sphCreateJsonQueryParser();
else
return sphCreatePlainQueryParser();
}
static void ParsePredictedTimeCosts ( const char * p )
{
// yet another mini-parser!
// ident=value [, ident=value [...]]
sph::ParseKeyValues ( p, [] (CSphString&& sIdent, CSphString&& sValue)
{
// bind value
if ( sIdent=="skip" )
g_iPredictorCostSkip = atoi ( sValue.cstr ());
else if ( sIdent=="doc" )
g_iPredictorCostDoc = atoi ( sValue.cstr ());
else if ( sIdent=="hit" )
g_iPredictorCostHit = atoi ( sValue.cstr ());
else if ( sIdent=="match" )
g_iPredictorCostMatch = atoi ( sValue.cstr ());
else
sphDie ( "predicted_time_costs: unknown identifier '%s' (known ones are skip, doc, hit, match)",
sIdent.cstr ());
});
}
// read system TFO settings and init g_ITFO according to it.
/* From https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt
* possible bitmask values are:
0x1: (client) enables sending data in the opening SYN on the client.
0x2: (server) enables the server support, i.e., allowing data in
a SYN packet to be accepted and passed to the
application before 3-way handshake finishes.
0x4: (client) send data in the opening SYN regardless of cookie
availability and without a cookie option.
0x200: (server) accept data-in-SYN w/o any cookie option present.
0x400: (server) enable all listeners to support Fast Open by
default without explicit TCP_FASTOPEN socket option.
Actually we interested only in first 2 bits.
*/
static void CheckSystemTFO ()
{
#if defined (MSG_FASTOPEN)
char sBuf[20] = { 0 };
g_iTFO = TFO_ABSENT;
FILE * fp = fopen ( "/proc/sys/net/ipv4/tcp_fastopen", "rb" );
if ( !fp )
{
sphInfo ( "TCP fast open unavailable (can't read /proc/sys/net/ipv4/tcp_fastopen, look Server_settings/Searchd#Technical-details-about-Sphinx-API-protocol-and-TFO in manual)" );
return;
}
auto szResult = fgets ( sBuf, 20, fp );
fclose ( fp );
if ( !szResult )
return;
g_iTFO = atoi ( szResult );
#else
g_iTFO = 3; // suggest it is available.
#endif
}
static void ConfigureDaemonLog ( const CSphString & sMode )
{
if ( sMode.IsEmpty() )
return;
StrVec_t dOpts = sphSplit ( sMode.cstr(), "," );
SmallStringHash_T<int> hStmt;
for ( int i=0; i<(int)( sizeof(g_dSqlStmts)/sizeof(g_dSqlStmts[0]) ); i++ )
hStmt.Add ( i, g_dSqlStmts[i] );
CSphBitvec tLogStatements ( STMT_TOTAL );
StringBuilder_c sWrongModes ( "," );
for ( const CSphString & sOpt : dOpts )
{
if ( sOpt=="0" ) // emplicitly disable all statements
return;
if ( sOpt=="1" || sOpt=="*" ) // enable all statements
{
tLogStatements.Set();
g_tLogStatements = tLogStatements;
return;
}
// check for whole statement enumerated
int * pMode = hStmt ( sOpt );
if ( pMode )
{
tLogStatements.BitSet ( *pMode );
continue;
}
bool bHasWild = false;
for ( const char * s = sOpt.cstr(); *s && !bHasWild; s++ )
bHasWild = sphIsWild ( *s );
if ( bHasWild )
{
bool bMatched = false;
for ( int i=0; i<(int)( sizeof(g_dSqlStmts)/sizeof(g_dSqlStmts[0]) ); i++ )
{
if ( sphWildcardMatch ( g_dSqlStmts[i], sOpt.cstr() ) )
{
tLogStatements.BitSet ( i );
bMatched = true;
break;
}
}
if ( bMatched )
continue;
}
sWrongModes += sOpt.cstr();
}
if ( tLogStatements.BitCount() )
g_tLogStatements = tLogStatements;
if ( !sWrongModes.IsEmpty() )
sphWarning ( "query_log_statements invalid values: %s", sWrongModes.cstr() );
}
static void SetOptionSI ( const CSphConfigSection & hSearchd, bool bTestMode )
{
SIDefault_e eState = GetSecondaryIndexDefault();
if ( bTestMode )
eState = SIDefault_e::DISABLED;
CSphVariant * pOption = hSearchd ( "secondary_indexes" );
if ( pOption )
{
if ( pOption->strval()=="force" )
eState = SIDefault_e::FORCE;
else if ( pOption->intval()==0 )
eState = SIDefault_e::DISABLED;
else
eState = SIDefault_e::ENABLED;
if ( eState != SIDefault_e::DISABLED && !IsSecondaryLibLoaded() )
sphWarning ( "secondary_indexes set but failed to initialize secondary library: %s", g_sSecondaryError.cstr() );
}
SetSecondaryIndexDefault ( eState );
}
static void ConfigureMerge ( const CSphConfigSection & hSearchd )
{
BuildBufferSettings_t tSettings;
tSettings.m_iBufferAttributes = hSearchd.GetSize ( "merge_buffer_attributes", tSettings.m_iBufferAttributes );
tSettings.m_iBufferColumnar = hSearchd.GetSize ( "merge_buffer_columnar", tSettings.m_iBufferColumnar );
tSettings.m_iBufferStorage = hSearchd.GetSize ( "merge_buffer_storage", tSettings.m_iBufferStorage );
tSettings.m_iBufferFulltext = hSearchd.GetSize ( "merge_buffer_fulltext", tSettings.m_iBufferFulltext );
tSettings.m_iBufferDict = hSearchd.GetSize ( "merge_buffer_dict", tSettings.m_iBufferDict );
tSettings.m_iSIMemLimit = hSearchd.GetSize ( "merge_si_memlimit", tSettings.m_iSIMemLimit );
SetMergeSettings(tSettings);
}
void ConfigureSearchd ( const CSphConfig & hConf, bool bOptPIDFile, bool bTestMode ) REQUIRES ( MainThread )
{
if ( !hConf.Exists ( "searchd" ) || !hConf["searchd"].Exists ( "searchd" ) )
sphFatal ( "'searchd' config section not found in '%s'", g_sConfigFile.cstr () );
const CSphConfigSection & hSearchd = hConf["searchd"]["searchd"];
sphCheckDuplicatePaths ( hConf );
if ( bOptPIDFile )
if ( !hSearchd ( "pid_file" ) )
sphFatal ( "mandatory option 'pid_file' not found in 'searchd' section" );
// read_timeout is now deprecated
g_iReadTimeoutS = hSearchd.GetSTimeS ( "read_timeout", 5);
// network_timeout overrides read_timeout
g_iReadTimeoutS = hSearchd.GetSTimeS ( "network_timeout", g_iReadTimeoutS );
g_iWriteTimeoutS = g_iReadTimeoutS;
g_bTimeoutEachPacket = hSearchd.GetBool( "reset_network_timeout_on_packet" );
g_iClientQlTimeoutS = hSearchd.GetSTimeS( "sphinxql_timeout", 900);
g_iClientTimeoutS = hSearchd.GetSTimeS ( "client_timeout", 300 );
g_iMaxConnection = hSearchd.GetInt ( "max_connections", g_iMaxConnection );
g_iThreads = hSearchd.GetInt ( "threads", GetNumLogicalCPUs() );
SetMaxChildrenThreads ( g_iThreads );
g_iThdQueueMax = hSearchd.GetInt ( "jobs_queue_size", g_iThdQueueMax );
g_iPersistentPoolSize = hSearchd.GetInt ("persistent_connections_limit");
// FIXME!!! remove depricated preopen_indexes
if ( hSearchd.Exists ( "preopen_tables" ) )
MutableIndexSettings_c::GetDefaults().m_bPreopen = hSearchd.GetBool ( "preopen_tables" );
else
MutableIndexSettings_c::GetDefaults().m_bPreopen = hSearchd.GetBool ( "preopen_indexes" );
sphSetUnlinkOld ( hSearchd.GetBool ( "unlink_old" ) );
g_iExpansionLimit = hSearchd.GetInt ( "expansion_limit" );
if ( hSearchd.Exists ( "expansion_merge_threshold_docs" ) )
ExpandedMergeThdDocs ( hSearchd.GetInt ( "expansion_merge_threshold_docs" ) );
if ( hSearchd.Exists ( "expansion_merge_threshold_hits" ) )
ExpandedMergeThdHits ( hSearchd.GetInt ( "expansion_merge_threshold_hits" ) );
// initialize buffering settings
SetUnhintedBuffer ( hSearchd.GetSize( "read_unhinted", DEFAULT_READ_UNHINTED ) );
int iReadBuffer = hSearchd.GetSize ( "read_buffer", DEFAULT_READ_BUFFER );
FileAccessSettings_t & tDefaultFA = MutableIndexSettings_c::GetDefaults().m_tFileAccess;
tDefaultFA.m_iReadBufferDocList = hSearchd.GetSize ( "read_buffer_docs", iReadBuffer );
tDefaultFA.m_iReadBufferHitList = hSearchd.GetSize ( "read_buffer_hits", iReadBuffer );
tDefaultFA.m_eDoclist = GetFileAccess( hSearchd, "access_doclists", true, FileAccess_e::FILE );
tDefaultFA.m_eHitlist = GetFileAccess( hSearchd, "access_hitlists", true, FileAccess_e::FILE );
tDefaultFA.m_eDict = GetFileAccess( hSearchd, "access_dict", false, tDefaultFA.m_eDict );
tDefaultFA.m_eAttr = FileAccess_e::MMAP_PREREAD;
tDefaultFA.m_eBlob = FileAccess_e::MMAP_PREREAD;
tDefaultFA.m_eAttr = GetFileAccess( hSearchd, "access_plain_attrs", false, tDefaultFA.m_eAttr );
tDefaultFA.m_eBlob = GetFileAccess( hSearchd, "access_blob_attrs", false, tDefaultFA.m_eBlob );
if ( hSearchd("subtree_docs_cache") )
g_iMaxCachedDocs = hSearchd.GetSize ( "subtree_docs_cache", g_iMaxCachedDocs );
if ( hSearchd("subtree_hits_cache") )
g_iMaxCachedHits = hSearchd.GetSize ( "subtree_hits_cache", g_iMaxCachedHits );
if ( hSearchd("seamless_rotate") )
g_bSeamlessRotate = ( hSearchd["seamless_rotate"].intval()!=0 );
if ( hSearchd ( "grouping_in_utc" ) )
{
if ( IsTimeZoneSet() )
sphWarning ( "grouping_in_utc=1 conflicts with 'timezone'" );
SetGroupingInUTC ( hSearchd["grouping_in_utc"].intval ()!=0 );
}
if ( hSearchd ( "timezone" ) )
{
if ( GetGroupingInUTC() )
sphWarning ( "grouping_in_utc=1 conflicts with 'timezone'" );
CSphString sWarn;
SetTimeZone ( hSearchd["timezone"].cstr(), sWarn );
if ( !sWarn.IsEmpty() )
sphWarning ( "%s", sWarn.cstr() );
else
sphInfo ( "Using time zone '%s'", GetTimeZoneName().cstr() );
}
if ( hSearchd("join_cache_size") )
SetJoinCacheSize ( hSearchd.GetSize64 ( "join_cache_size", GetJoinCacheSize() ) );
// sha1 password hash for shutdown action
SetShutdownToken ( hSearchd.GetStr ( "shutdown_token" ) );
if ( !g_bSeamlessRotate && MutableIndexSettings_c::GetDefaults().m_bPreopen && !bTestMode )
sphWarning ( "preopen_indexes=1 has no effect with seamless_rotate=0" );
SetAttrFlushPeriod ( hSearchd.GetUsTime64S ( "attr_flush_period", 0 ));
g_iMaxPacketSize = hSearchd.GetSize ( "max_packet_size", g_iMaxPacketSize );
g_iMaxFilters = hSearchd.GetInt ( "max_filters", g_iMaxFilters );
g_iMaxFilterValues = hSearchd.GetInt ( "max_filter_values", g_iMaxFilterValues );
g_iMaxBatchQueries = hSearchd.GetInt ( "max_batch_queries", g_iMaxBatchQueries );
g_iDistThreads = hSearchd.GetInt ( "max_threads_per_query", g_iDistThreads );
sphSetThrottling ( hSearchd.GetInt ( "rt_merge_iops", 0 ), hSearchd.GetSize ( "rt_merge_maxiosize", 0 ) );
g_iPingIntervalUs = hSearchd.GetUsTime64Ms ( "ha_ping_interval", 1000000 );
g_uHAPeriodKarmaS = hSearchd.GetSTimeS ( "ha_period_karma", 60 );
g_iQueryLogMinMs = hSearchd.GetMsTimeMs ( "query_log_min_msec", g_iQueryLogMinMs );
g_iAgentConnectTimeoutMs = hSearchd.GetMsTimeMs ( "agent_connect_timeout", g_iAgentConnectTimeoutMs );
g_iAgentQueryTimeoutMs = hSearchd.GetMsTimeMs ( "agent_query_timeout", g_iAgentQueryTimeoutMs );
g_iAgentRetryDelayMs = hSearchd.GetMsTimeMs ( "agent_retry_delay", g_iAgentRetryDelayMs );
if ( g_iAgentRetryDelayMs > DAEMON_MAX_RETRY_DELAY )
sphWarning ( "agent_retry_delay %d exceeded max recommended %d", g_iAgentRetryDelayMs, DAEMON_MAX_RETRY_DELAY );
g_iAgentRetryCount = hSearchd.GetInt ( "agent_retry_count", g_iAgentRetryCount );
if ( g_iAgentRetryCount > DAEMON_MAX_RETRY_COUNT )
sphWarning ( "agent_retry_count %d exceeded max recommended %d", g_iAgentRetryCount, DAEMON_MAX_RETRY_COUNT );
g_iReplConnectTimeoutMs = hSearchd.GetMsTimeMs ( "replication_connect_timeout", g_iReplConnectTimeoutMs );
g_iReplQueryTimeoutMs = hSearchd.GetMsTimeMs ( "replication_query_timeout", g_iReplQueryTimeoutMs );
g_iReplRetryCount = hSearchd.GetInt ( "replication_retry_count", g_iReplRetryCount );
g_iReplRetryDelayMs = hSearchd.GetMsTimeMs ( "replication_retry_delay", g_iReplRetryDelayMs );
ReplicationSetTimeouts ( g_iReplConnectTimeoutMs, g_iReplQueryTimeoutMs, g_iReplRetryCount, g_iReplRetryDelayMs );
g_tmWaitUS = hSearchd.GetUsTime64Ms ( "net_wait_tm", g_tmWaitUS );
g_iThrottleAction = hSearchd.GetInt ( "net_throttle_action", g_iThrottleAction );
g_iThrottleAccept = hSearchd.GetInt ( "net_throttle_accept", g_iThrottleAccept );
g_iNetWorkers = hSearchd.GetInt ( "net_workers", g_iNetWorkers );
g_iNetWorkers = Max ( g_iNetWorkers, 1 );
CheckSystemTFO();
if ( g_iTFO!=TFO_ABSENT && hSearchd.GetInt ( "listen_tfo", 1 )==0 )
{
g_iTFO &= ~TFO_LISTEN;
}
bool bLocaleSet = false;
if ( hSearchd ( "collation_libc_locale" ) )
{
auto sLocale = hSearchd.GetStr ( "collation_libc_locale" );
bLocaleSet = setlocale ( LC_COLLATE, sLocale.cstr() );
if ( !bLocaleSet )
sphWarning ( "setlocale failed (locale='%s')", sLocale.cstr() );
}
CSphString sLoc = setlocale ( LC_COLLATE, nullptr );
SetLocale( sLoc, bLocaleSet );
if ( hSearchd ( "collation_server" ) )
{
CSphString sCollation = hSearchd.GetStr ( "collation_server" );
CSphString sError;
GlobalCollation () = sphCollationFromName ( sCollation, &sError );
if ( !sError.IsEmpty() )
sphWarning ( "%s", sError.cstr() );
}
if ( hSearchd("thread_stack") ) // fixme! rename? That is limit for stack of the coro, not of the thread!
{
constexpr int iThreadStackSizeMin = 128*1024;
int iStackSize = hSearchd.GetSize ( "thread_stack", iThreadStackSizeMin );
if ( iStackSize<iThreadStackSizeMin )
sphWarning ( "thread_stack %d less than default (128K), increased", iStackSize );
iStackSize = Max ( iStackSize, iThreadStackSizeMin );
Threads::SetMaxCoroStackSize ( iStackSize );
}
if ( hSearchd("predicted_time_costs") )
ParsePredictedTimeCosts ( hSearchd["predicted_time_costs"].cstr() );
if ( hSearchd("shutdown_timeout") )
g_iShutdownTimeoutUs = hSearchd.GetUsTime64S ( "shutdown_timeout", 60000000);
g_iDocstoreCache = hSearchd.GetSize64 ( "docstore_cache_size", 16777216 );
g_iSkipCache = hSearchd.GetSize64 ( "skiplist_cache_size", 67108864 );
if ( hSearchd.Exists ( "max_open_files" ) )
{
#if HAVE_GETRLIMIT & HAVE_SETRLIMIT
auto uLimit = ( rlim_t ) hSearchd["max_open_files"].intval ();
bool bMax = hSearchd["max_open_files"].strval ()=="max";
if ( !uLimit && !bMax )
sphWarning ( "max_open_files is %d, expected positive value; ignored", (int) uLimit );
else
{
struct rlimit dRlimit;
if ( 0!=getrlimit ( RLIMIT_NOFILE, &dRlimit ) )
sphWarning ( "Failed to getrlimit (RLIMIT_NOFILE), error %d: %s", errno, strerrorm ( errno ) );
else
{
auto uPrevLimit = dRlimit.rlim_cur;
if ( bMax )
uLimit = dRlimit.rlim_max;
dRlimit.rlim_cur = Min ( dRlimit.rlim_max, uLimit );
if ( 0!=setrlimit ( RLIMIT_NOFILE, &dRlimit ) )
sphWarning ( "Failed to setrlimit on %d, error %d: %s", (int)uLimit, errno, strerrorm ( errno ) );
else
sphInfo ( "Set max_open_files to %d (previous was %d), hardlimit is %d.",
(int)uLimit, (int)uPrevLimit, (int)dRlimit.rlim_max );
}
}
#else
sphWarning ("max_open_files defined, but this binary don't know about setrlimit() function");
#endif
}
QcacheStatus_t s = QcacheGetStatus();
s.m_iMaxBytes = hSearchd.GetSize64 ( "qcache_max_bytes", s.m_iMaxBytes );
s.m_iThreshMs = hSearchd.GetMsTimeMs ( "qcache_thresh_msec", s.m_iThreshMs );
s.m_iTtlS = hSearchd.GetSTimeS ( "qcache_ttl_sec", s.m_iTtlS );
QcacheSetup ( s.m_iMaxBytes, s.m_iThreshMs, s.m_iTtlS );
// hostname_lookup = {config_load | request}
g_bHostnameLookup = ( hSearchd.GetStr ( "hostname_lookup" ) == "request" );
CSphVariant * pLogMode = hSearchd ( "query_log_mode" );
if ( pLogMode && !pLogMode->strval().IsEmpty() )
{
errno = 0;
int iMode = strtol ( pLogMode->strval().cstr(), NULL, 8 );
int iErr = errno;
if ( iErr==ERANGE || iErr==EINVAL )
{
sphWarning ( "query_log_mode invalid value (value=%o, error=%s); skipped", iMode, strerrorm(iErr) );
} else
{
g_iLogFileMode = iMode;
}
}
if ( hSearchd ( "server_id" ) )
{
g_iServerID = hSearchd.GetInt ( "server_id", g_iServerID );
g_bServerID = true;
const int iServerMask = 0x7f;
if ( g_iServerID>iServerMask )
{
g_iServerID &= iServerMask;
sphWarning ( "server_id out of range 0 - 127, clamped to %d", g_iServerID );
}
}
g_sMySQLVersion = hSearchd.GetStr ( "mysql_version_string", g_sMySQLVersion.cstr() );
sphinxexpr::MySQLVersion() = g_sMySQLVersion;
AllowOnlyNot ( hSearchd.GetInt ( "not_terms_only_allowed", 0 )!=0 );
ConfigureDaemonLog ( hSearchd.GetStr ( "query_log_commands" ) );
g_iAutoOptimizeCutoffMultiplier = hSearchd.GetInt ( "auto_optimize", 1 );
MutableIndexSettings_c::GetDefaults().m_iOptimizeCutoff = hSearchd.GetInt ( "optimize_cutoff", AutoOptimizeCutoff() );
SetPseudoSharding ( hSearchd.GetInt ( "pseudo_sharding", 1 )!=0 );
SetOptionSI ( hSearchd, bTestMode );
CSphString sWarning;
AttrEngine_e eEngine = AttrEngine_e::DEFAULT;
if ( StrToAttrEngine ( eEngine, AttrEngine_e::ROWWISE, hSearchd.GetStr("engine"), sWarning ) )
SetDefaultAttrEngine(eEngine);
else
sphWarning ( "%s", sWarning.cstr() );
g_bHasBuddyPath = hSearchd.Exists ( "buddy_path" );
g_sBuddyPath = hSearchd.GetStr ( "buddy_path" );
g_bTelemetry = ( hSearchd.GetInt ( "telemetry", g_bTelemetry ? 1 : 0 )!=0 );
g_bAutoSchema = ( hSearchd.GetInt ( "auto_schema", g_bAutoSchema ? 1 : 0 )!=0 );
SetAccurateAggregationDefault ( hSearchd.GetInt ( "accurate_aggregation", GetAccurateAggregationDefault() )!=0 );
SetDistinctThreshDefault ( hSearchd.GetInt ( "distinct_precision_threshold", GetDistinctThreshDefault() ) );
ConfigureMerge(hSearchd);
}
static void DirMustWritable ( const CSphString & sDataDir )
{
CSphString sError;
CSphString sTmpName;
sTmpName.SetSprintf ( "%s/gmb_%d", sDataDir.cstr(), (int)getpid() );
CSphWriter tFile;
if ( !tFile.OpenFile ( sTmpName, sError ) )
sphFatal ( "The directory Manticore starts from must be writable for the daemon, error: %s", sError.cstr() );
tFile.PutDword( 1 );
tFile.Flush();
if ( tFile.IsError() )
sphFatal ( "The directory Manticore starts from must be writable for the daemon, error: %s", sError.cstr() );
}
static void CheckSetCwd () REQUIRES ( MainThread )
{
if ( g_bNoChangeCwd || !IsConfigless() )
return;
CSphString sDataDir = GetDataDirInt();
if ( !IsPathAbsolute ( sDataDir ) )
{
DirMustWritable ( "." );
return;
}
int iRes = chdir ( sDataDir.cstr() );
if ( iRes!=0 )
sphFatal ( "failed to change current working directory to '%s': %s", sDataDir.cstr(), strerror(errno) );
sphLogDebug ( "current working directory changed to '%s'", sDataDir.cstr() );
DirMustWritable ( sDataDir );
g_bCwdChanged = true;
}
static void PutPath ( const CSphString & sCwd, const CSphString & sVar, RowBuffer_i & tOut )
{
if ( !IsPathAbsolute ( sVar ) )
{
CSphString sPath;
sPath.SetSprintf ( "%s/%s", sCwd.cstr(), sVar.cstr() );
tOut.PutString ( sPath );
} else
{
tOut.PutString ( sVar );
}
}
class StringSetStatic_c : public sph::StringSet
{
public:
StringSetStatic_c ( std::initializer_list<const char *> dArgs )
{
for ( const char * sName : dArgs )
Add ( sName );
}
};
static StringSetStatic_c g_hSearchdPathVars {
"binlog_path"
, "data_dir"
, "lemmatizer_base"
, "log"
, "pid_file"
, "plugin_dir"
, "query_log"
, "snippets_file_prefix"
, "sphinxql_state"
, "ssl_ca"
, "ssl_cert"
, "ssl_key"
};
static void DumpSettingsSection ( const CSphConfigSection & hNode, const char * sSectionName, RowBuffer_i & tOut )
{
StringBuilder_c tTmp;
for ( const auto & tIt : hNode )
{
const CSphVariant * pVal = &tIt.second;
// empty binlog_path should not have default value
if ( tIt.first=="binlog_path" && pVal->strval().IsEmpty() )
continue;
tTmp.Clear();
tTmp.Appendf ( "%s.%s", sSectionName, tIt.first.cstr() );
do
{
// data packets
tOut.PutString ( tTmp.cstr() );
if ( g_hSearchdPathVars[tIt.first] )
PutPath ( g_sConfigPath, pVal->strval(), tOut );
else
tOut.PutString ( pVal->strval() );
if ( !tOut.Commit() )
return;
pVal = pVal->m_pNext;
} while ( pVal );
}
}
static void DumpSettingsSection ( const CSphConfig & hConf, const char * sSectionName, RowBuffer_i & tOut )
{
if ( !hConf.Exists ( sSectionName ) || !hConf[sSectionName].Exists ( sSectionName ) )
return;
DumpSettingsSection ( hConf[sSectionName][sSectionName], sSectionName, tOut );
}
static void DumpCommonSection ( const CSphConfig & hConf, RowBuffer_i & tOut )
{
CSphString sCommonName ( "common" );
CSphString sPDirName ( "plugin_dir" );
if ( hConf.Exists ( sCommonName ) && hConf[sCommonName].Exists ( sCommonName ) && hConf[sCommonName][sCommonName].Exists ( sPDirName ) )
{
DumpSettingsSection ( hConf, sCommonName.cstr(), tOut );
return;
}
// plugin_dir should be printed always
CSphConfigSection hCommon;
if ( hConf.Exists ( sCommonName ) && hConf[sCommonName].Exists ( sCommonName ) )
hCommon = hConf[sCommonName][sCommonName];
hCommon.AddEntry ( sPDirName.cstr(), PluginGetDir().cstr() );
DumpSettingsSection ( hCommon, sCommonName.cstr(), tOut );
}
void HandleMysqlShowSettings ( const CSphConfig & hConf, RowBuffer_i & tOut )
{
tOut.HeadBegin ();
tOut.HeadColumn ( "Setting_name" );
tOut.HeadColumn ( "Value" );
tOut.HeadEnd ();
// configuration file path
tOut.PutString ( "configuration_file" );
PutPath ( g_sConfigPath, g_sConfigFile, tOut );
tOut.Commit();
// pid
tOut.PutString ( "worker_pid" );
tOut.PutNumAsString ( (int)getpid() );
tOut.Commit();
DumpSettingsSection ( hConf, "searchd", tOut );
DumpCommonSection ( hConf, tOut );
DumpSettingsSection ( hConf, "indexer", tOut );
// done
tOut.Eof();
}
// load index which is not yet load, and publish it in served indexes.
// ServiceMain -> ConfigureAndPreloadOnStartup -> ConfigureAndPreloadIndex
// ServiceMain -> ConfigureAndPreloadOnStartup -> ConfigureAndPreloadConfiglessIndexes -> ConfiglessPreloadIndex -> ConfigureAndPreloadIndex
// from any another thread:
// ClientSession_c::Execute -> HandleMysqlImportTable -> AddExistingIndexConfigless -> ConfiglessPreloadIndex -> ConfigureAndPreloadIndex
ESphAddIndex ConfigureAndPreloadIndex ( const CSphConfigSection & hIndex, const char * szIndexName, StrVec_t& dWarnings, CSphString& sError )
{
auto [eAdd, pJustLoadedLocal] = AddIndex ( szIndexName, hIndex, true, false, nullptr, sError );
// local plain, rt, percolate added, but need to be at least preallocated before they could work.
switch ( eAdd )
{
case ADD_NEEDLOAD:
{
assert ( pJustLoadedLocal );
fprintf ( stdout, "precaching table '%s'\n", szIndexName );
fflush ( stdout );
IndexFiles_c dJustAddedFiles ( pJustLoadedLocal->m_sIndexPath );
if ( dJustAddedFiles.HasAllFiles ( ".new" ) )
{
WIdx_c WFake { pJustLoadedLocal }; // as RotateIndexGreedy wants w-locked
if ( RotateIndexGreedy ( *pJustLoadedLocal, szIndexName, sError ) )
{
if ( !FixupAndLockIndex ( *pJustLoadedLocal, UnlockedHazardIdxFromServed ( *pJustLoadedLocal ), &hIndex, szIndexName, dWarnings, sError ) )
return ADD_ERROR;
} else
{
dWarnings.Add ( sError );
if ( !PreallocNewIndex ( *pJustLoadedLocal, &hIndex, szIndexName, dWarnings, sError ) )
return ADD_ERROR;
}
} else
{
if ( !PreallocNewIndex ( *pJustLoadedLocal, &hIndex, szIndexName, dWarnings, sError ) )
return ADD_ERROR;
// index could load global_idf from the settings
// need to pass and load global idf below
CSphIndex * pIdx = UnlockedHazardIdxFromServed ( *pJustLoadedLocal );
if ( pIdx->GetMutableSettings().IsSet ( MutableName_e::GLOBAL_IDF ) )
pJustLoadedLocal->m_sGlobalIDFPath = pIdx->GetMutableSettings().m_sGlobalIDFPath;
}
}
// no break
case ADD_SERVED:
{
// finally add the index to the hash of enabled.
g_pLocalIndexes->Add ( pJustLoadedLocal, szIndexName );
if ( !pJustLoadedLocal->m_sGlobalIDFPath.IsEmpty() && !sph::PrereadGlobalIDF ( pJustLoadedLocal->m_sGlobalIDFPath, sError ) )
dWarnings.Add ( "global IDF unavailable - IGNORING" );
}
// no sense to break
case ADD_DISTR:
case ADD_ERROR:
default:
break;
}
return eAdd;
}
// invoked once on start from ServiceMain (actually it creates the hashes)
// ServiceMain -> ConfigureAndPreloadOnStartup
static void ConfigureAndPreloadOnStartup ( const CSphConfig & hConf, const StrVec_t & dOptIndexes ) REQUIRES (MainThread)
{
int iCounter = 0;
int iValidIndexes = 0;
int64_t tmLoad = -sphMicroTimer();
if ( hConf.Exists ( "index" ) )
{
assert ( !IsConfigless() );
for ( const auto& tIndex : hConf["index"] )
{
const CSphConfigSection & hIndex = tIndex.second;
const char * szIndexName = tIndex.first.cstr();
if ( !dOptIndexes.IsEmpty() && !dOptIndexes.any_of ( [&] ( const CSphString &rhs ) { return rhs.EqN ( szIndexName ); } ) )
continue;
StrVec_t dWarnings;
CSphString sError;
ESphAddIndex eAdd = ConfigureAndPreloadIndex ( hIndex, szIndexName, dWarnings, sError );
for ( const auto & i : dWarnings )
sphWarning ( "table '%s': %s", szIndexName, i.cstr() );
if ( eAdd==ADD_ERROR )
sphWarning ( "table '%s': %s - NOT SERVING", szIndexName, sError.cstr() );
iValidIndexes += ( eAdd!=ADD_ERROR ? 1 : 0 );
iCounter += ( eAdd== ADD_NEEDLOAD ? 1 : 0 );
}
} else {
assert ( IsConfigless() );
ConfigureAndPreloadConfiglessIndexes ( iValidIndexes, iCounter );
}
InitPersistentPool();
ServedSnap_t hLocal = g_pLocalIndexes->GetHash();
for ( const auto& tIt : *hLocal )
{
auto pServed = tIt.second;
if ( pServed )
{
CSphString sWarning, sError;
RIdx_c pIdx { pServed };
if ( !ApplyIndexKillList ( pIdx, sWarning, sError, true ) )
sphWarning ( "table '%s': error applying killlist: %s", pIdx->GetName(), sError.cstr() );
if ( sWarning.Length() )
sphWarning ( "%s", sWarning.cstr() );
}
}
// set index cluster name for check
for ( const ClusterDesc_t & tClusterDesc : GetClustersInt() )
for ( const auto & tIndex : tClusterDesc.m_hIndexes )
AssignClusterToIndex ( tIndex.first, tClusterDesc.m_sName );
sphLogDebugRpl ( "%d clusters loaded from config", GetClustersInt().GetLength() );
tmLoad += sphMicroTimer();
if ( !iValidIndexes )
sphLogDebug ( "no valid tables to serve" );
else
fprintf ( stdout, "precached %d tables in %0.3f sec\n", iCounter, float(tmLoad)/1000000 );
}
// if data_dir changes cwd then paths at sections searchd and common should be fixed from realtive into absolute
static void FixPathAbsolute ( CSphString & sPath )
{
if ( !g_bCwdChanged )
return;
if ( sPath.IsEmpty() || IsPathAbsolute ( sPath ) )
return;
assert ( !g_sExePath.IsEmpty() );
CSphString sFullPath;
sFullPath.SetSprintf ( "%s/%s", g_sExePath.cstr(), sPath.cstr() );
sPath = sphNormalizePath ( sFullPath );
}
void OpenDaemonLog ( const CSphConfigSection & hSearchd, bool bCloseIfOpened=false )
{
CSphString sLog = "searchd.log";
if ( hSearchd.Exists ( "log" ) )
{
if ( hSearchd["log"]=="syslog" )
{
#if !USE_SYSLOG
if ( g_iLogFile<0 )
{
g_iLogFile = STDOUT_FILENO;
sphWarning ( "failed to use syslog for logging. You have to reconfigure --with-syslog and rebuild the daemon!" );
sphInfo ( "will use default file 'searchd.log' for logging." );
}
#else
g_bLogSyslog = true;
#endif
} else
sLog = hSearchd["log"].cstr();
}
umask ( 066 );
if ( bCloseIfOpened && g_iLogFile!=STDOUT_FILENO )
{
close ( g_iLogFile );
g_iLogFile = STDOUT_FILENO;
}
if ( !g_bLogSyslog )
{
FixPathAbsolute ( sLog );
g_iLogFile = open ( sLog.cstr(), O_CREAT | O_RDWR | O_APPEND, S_IREAD | S_IWRITE );
if ( g_iLogFile<0 )
{
g_iLogFile = STDOUT_FILENO;
sphFatal ( "failed to open log file '%s': %s", sLog.cstr(), strerrorm(errno) );
}
LogChangeMode ( g_iLogFile, g_iLogFileMode );
}
g_sLogFile = sLog;
g_bLogTty = isatty ( g_iLogFile )!=0;
}
static void SetUidShort ( bool bTestMode )
{
int iServerId = g_iServerID;
// need constant seed across all environments for tests
if ( bTestMode )
return UidShortSetup ( iServerId, 100000 );
const int iServerMask = 0x7f;
uint64_t uStartedSec = 0;
// server id as high part of counter
if ( !iServerId )
{
CSphString sMAC = GetMacAddress();
sphLogDebug ( "MAC address %s for uuid-short server_id", sMAC.cstr() );
if ( sMAC.IsEmpty() )
{
DWORD uSeed = sphRand();
sMAC.SetSprintf ( "%u", uSeed );
sphWarning ( "failed to get MAC address, using random number %s", sMAC.cstr() );
}
// fold MAC into 1 byte
iServerId = Pearson8 ( (const BYTE *)sMAC.cstr(), sMAC.Length() );
iServerId &= iServerMask;
}
// start time Unix timestamp as middle part of counter
uStartedSec = sphMicroTimer() / 1000000;
// base timestamp is 01 May of 2019
const uint64_t uBaseSec = 1556668800;
if ( uStartedSec>uBaseSec )
uStartedSec -= uBaseSec;
UidShortSetup ( iServerId, (int)uStartedSec );
}
namespace { // static
// implement '--stop' and '--stopwait' (connect and stop another instance by pid file from config)
void StopOrStopWaitAnother ( CSphVariant * v, bool bWait ) REQUIRES ( MainThread )
{
if ( !v )
sphFatal ( "stop: option 'pid_file' not found in '%s' section 'searchd'", g_sConfigFile.cstr () );
CSphString sPidFile = v->cstr();
FixPathAbsolute ( sPidFile );
FILE * fp = fopen ( sPidFile.cstr(), "r" );
if ( !fp )
sphFatal ( "stop: pid file '%s' does not exist or is not readable", sPidFile.cstr() );
char sBuf[16];
int iLen = (int) fread ( sBuf, 1, sizeof(sBuf)-1, fp );
sBuf[iLen] = '\0';
fclose ( fp );
int iPid = atoi(sBuf);
if ( iPid<=0 )
sphFatal ( "stop: failed to read valid pid from '%s'", sPidFile.cstr() );
int iWaitTimeout = g_iShutdownTimeoutUs + 100000;
#if _WIN32
bool bTerminatedOk = false;
char szPipeName[64];
snprintf ( szPipeName, sizeof(szPipeName), "\\\\.\\pipe\\searchd_%d", iPid );
HANDLE hPipe = INVALID_HANDLE_VALUE;
while ( hPipe==INVALID_HANDLE_VALUE )
{
hPipe = CreateFile ( szPipeName, GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, NULL );
if ( hPipe==INVALID_HANDLE_VALUE )
{
if ( GetLastError()!=ERROR_PIPE_BUSY )
{
fprintf ( stdout, "WARNING: could not open pipe (GetLastError()=%d)\n", GetLastError () );
break;
}
if ( !WaitNamedPipe ( szPipeName, iWaitTimeout/1000 ) )
{
fprintf ( stdout, "WARNING: could not open pipe (GetLastError()=%d)\n", GetLastError () );
break;
}
}
}
if ( hPipe!=INVALID_HANDLE_VALUE )
{
DWORD uWritten = 0;
BYTE uWrite = 1;
BOOL bResult = WriteFile ( hPipe, &uWrite, 1, &uWritten, NULL );
if ( !bResult )
fprintf ( stdout, "WARNING: failed to send SIGHTERM to searchd (pid=%d, GetLastError()=%d)\n", iPid, GetLastError () );
bTerminatedOk = !!bResult;
CloseHandle ( hPipe );
}
if ( bTerminatedOk )
{
sphInfo ( "stop: successfully terminated pid %d", iPid );
exit ( 0 );
} else
sphFatal ( "stop: error terminating pid %d", iPid );
#else
CSphString sPipeName;
int iPipeCreated = -1;
int fdPipe = -1;
if ( bWait )
{
sPipeName = GetNamedPipeName ( iPid );
::unlink ( sPipeName.cstr () ); // avoid garbage to pollute us
int iMask = umask ( 0 );
iPipeCreated = mkfifo ( sPipeName.cstr(), 0666 );
umask ( iMask );
if ( iPipeCreated!=-1 )
fdPipe = ::open ( sPipeName.cstr(), O_RDONLY | O_NONBLOCK );
if ( iPipeCreated==-1 )
sphWarning ( "mkfifo failed (path=%s, err=%d, msg=%s); will NOT wait", sPipeName.cstr(), errno, strerrorm(errno) );
else if ( fdPipe<0 )
sphWarning ( "open failed (path=%s, err=%d, msg=%s); will NOT wait", sPipeName.cstr(), errno, strerrorm(errno) );
}
if ( kill ( iPid, SIGTERM ) )
sphFatal ( "stop: kill() on pid %d failed: %s", iPid, strerrorm(errno) );
else
sphInfo ( "stop: successfully sent SIGTERM to pid %d", iPid );
int iExitCode = ( bWait && ( iPipeCreated==-1 || fdPipe<0 ) ) ? 1 : 0;
bool bHandshake = true;
if ( bWait && fdPipe>=0 )
while ( true )
{
int iReady = sphPoll ( fdPipe, iWaitTimeout );
// error on wait
if ( iReady<0 )
{
iExitCode = 3;
sphWarning ( "stopwait%s error '%s'", ( bHandshake ? " handshake" : " " ), strerrorm(errno) );
break;
}
// timeout
if ( iReady==0 )
{
if ( !bHandshake )
continue;
iExitCode = 1;
break;
}
// reading data
DWORD uStatus = 0;
int iRead = ::read ( fdPipe, &uStatus, sizeof(DWORD) );
if ( iRead!=sizeof(DWORD) )
{
sphWarning ( "stopwait read fifo error '%s'", strerrorm(errno) );
iExitCode = 3; // stopped demon crashed during stop
break;
} else
{
iExitCode = ( uStatus==1 ? 0 : 2 ); // uStatus == 1 - AttributeSave - ok, other values - error
}
if ( !bHandshake )
break;
bHandshake = false;
}
::unlink ( sPipeName.cstr () ); // is ok on linux after it is opened.
if ( fdPipe>=0 )
::close ( fdPipe );
exit ( iExitCode );
#endif
}
} // static namespace
static void InitBanner()
{
const char * szColumnarVer = GetColumnarVersionStr();
CSphString sColumnar = "";
if ( szColumnarVer )
sColumnar.SetSprintf ( " (columnar %s)", szColumnarVer );
const char * szSiVer = GetSecondaryVersionStr();
CSphString sSi = "";
if ( szSiVer )
sSi.SetSprintf ( " (secondary %s)", szSiVer );
const char * szKNNVer = GetKNNVersionStr();
CSphString sKNN = "";
if ( szKNNVer )
sKNN.SetSprintf ( " (knn %s)", szKNNVer );
g_sBannerVersion.SetSprintf ( "%s%s%s%s", szMANTICORE_NAME, sColumnar.cstr(), sSi.cstr(), sKNN.cstr() );
g_sBanner.SetSprintf ( "%s%s", g_sBannerVersion.cstr(), szMANTICORE_BANNER_TEXT );
g_sMySQLVersion.SetSprintf ( "%s%s%s%s", szMANTICORE_VERSION, sColumnar.cstr(), sSi.cstr(), sKNN.cstr() );
g_sStatusVersion.SetSprintf ( "%s%s%s%s", szMANTICORE_VERSION, sColumnar.cstr(), sSi.cstr(), sKNN.cstr() );
}
static void CheckSSL()
{
// check for SSL inited well
for ( const auto & tListener : g_dListeners )
{
CSphString sError;
if ( tListener.m_eProto==Proto_e::HTTPS )
{
if ( !CheckWeCanUseSSL ( &sError ) )
sphWarning ( "SSL init error: %s", sError.cstr() );
break;
}
}
}
static void CacheCPUInfo()
{
// these funcs do caching inside
GetNumLogicalCPUs();
GetNumPhysicalCPUs();
}
static void LogTimeZoneStartup ( const CSphString & sWarning )
{
// avoid writing this to stdout
bool bLogStdout = g_bLogStdout;
g_bLogStdout = false;
if ( !sWarning.IsEmpty() )
sphWarning ( "Error initializing time zones: %s", sWarning.cstr() );
sphInfo ( "Using local time zone '%s'", GetLocalTimeZoneName().cstr() );
g_bLogStdout = bLogStdout;
}
#ifndef LOCALDATADIR
#define LOCALDATADIR "."
#endif
int WINAPI ServiceMain ( int argc, char **argv ) EXCLUDES (MainThread)
{
ScopedRole_c thMain (MainThread);
g_bLogTty = isatty ( g_iLogFile )!=0;
#ifdef USE_VTUNE
__itt_pause ();
#endif // USE_VTUNE
g_tmStarted = sphMicroTimer();
#if _WIN32
CSphVector<char *> dArgs;
if ( g_bService )
{
g_ssHandle = RegisterServiceCtrlHandler ( g_sServiceName, ServiceControl );
if ( !g_ssHandle )
sphFatal ( "failed to start service: RegisterServiceCtrlHandler() failed: %s", WinErrorInfo() );
g_ss.dwServiceType = SERVICE_WIN32_OWN_PROCESS;
MySetServiceStatus ( SERVICE_START_PENDING, NO_ERROR, 4000 );
if ( argc<=1 )
{
dArgs.Resize ( g_dArgs.GetLength() );
ARRAY_FOREACH ( i, g_dArgs )
dArgs[i] = (char*) g_dArgs[i].cstr();
argc = g_dArgs.GetLength();
argv = &dArgs[0];
}
}
char szPipeName[64];
snprintf ( szPipeName, sizeof(szPipeName), "\\\\.\\pipe\\searchd_%d", getpid() );
g_hPipe = CreateNamedPipe ( szPipeName, PIPE_ACCESS_INBOUND,
PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_NOWAIT,
PIPE_UNLIMITED_INSTANCES, 0, WIN32_PIPE_BUFSIZE, NMPWAIT_NOWAIT, NULL );
ConnectNamedPipe ( g_hPipe, NULL );
#endif
tzset();
Tracer::Init();
#if _WIN32
CheckWinInstall();
#endif
CSphString sError, sKNNError;
// initialize it before other code to fetch version string for banner
bool bColumnarError = !InitColumnar ( sError );
bool bSecondaryError = !InitSecondary ( g_sSecondaryError );
bool bKNNError = !InitKNN ( sKNNError );
sphCollationInit ();
InitBanner();
if ( !g_bService )
fprintf ( stdout, "%s", g_sBanner.cstr() );
if ( bColumnarError )
sphWarning ( "Error initializing columnar storage: %s", sError.cstr() );
if ( bSecondaryError )
sphWarning ( "Error initializing secondary index: %s", g_sSecondaryError.cstr() );
if ( bKNNError )
sphWarning ( "Error initializing knn index: %s", sKNNError.cstr() );
if ( !sError.IsEmpty() )
sError = "";
CSphString sTZWarning;
{
StrVec_t dWarnings;
InitTimeZones ( dWarnings );
sTZWarning = ConcatWarnings(dWarnings);
}
//////////////////////
// parse command line
//////////////////////
CSphConfig conf;
bool bOptStop = false;
bool bOptStopWait = false;
bool bOptStatus = false;
bool bOptPIDFile = false;
StrVec_t dOptIndexes; // indexes explicitly pointed in cmdline options
int iOptPort = 0;
bool bOptPort = false;
CSphString sOptListen;
bool bOptListen = false;
bool bTestMode = false;
bool bOptDebugQlog = true;
bool bForcedPreread = false;
bool bNewCluster = false;
bool bNewClusterForce = false;
bool bForcePseudoSharding = false;
const char* szCmdConfigFile = nullptr;
DWORD uReplayFlags = 0;
#define OPT(_a1,_a2) else if ( !strcmp(argv[i],_a1) || !strcmp(argv[i],_a2) )
#define OPT1(_a1) else if ( !strcmp(argv[i],_a1) )
int i;
for ( i=1; i<argc; i++ )
{
// handle non-options
if ( argv[i][0]!='-' ) break;
// handle no-arg options
OPT ( "-h", "--help" ) { ShowHelp(); return 0; }
OPT ( "-?", "--?" ) { ShowHelp(); return 0; }
OPT ( "-v", "--version" ) { return 0; }
OPT1 ( "--console" ) { g_bOptNoLock = true; g_bOptNoDetach = true; bTestMode = true; }
OPT1 ( "--stop" ) bOptStop = true;
OPT1 ( "--stopwait" ) { bOptStop = true; bOptStopWait = true; }
OPT1 ( "--status" ) bOptStatus = true;
OPT1 ( "--pidfile" ) bOptPIDFile = true;
OPT1 ( "--iostats" ) g_bIOStats = true;
OPT1 ( "--cpustats" ) g_bCpuStats = true;
#if _WIN32
OPT1 ( "--install" ) { if ( !g_bService ) { ServiceInstall ( argc, argv ); return 0; } }
OPT1 ( "--delete" ) { if ( !g_bService ) { ServiceDelete (); return 0; } }
OPT1 ( "--ntservice" ) {} // it's valid but handled elsewhere
#else
OPT1 ( "--nodetach" ) g_bOptNoDetach = true;
#endif
OPT1 ( "--logdebug" ) g_eLogLevel = Max ( g_eLogLevel, SPH_LOG_DEBUG );
OPT1 ( "--logdebugv" ) g_eLogLevel = Max ( g_eLogLevel, SPH_LOG_VERBOSE_DEBUG );
OPT1 ( "--logdebugvv" ) g_eLogLevel = Max ( g_eLogLevel, SPH_LOG_VERY_VERBOSE_DEBUG );
OPT1 ( "--logreplication" ) g_eLogLevel = Max ( g_eLogLevel, SPH_LOG_RPL_DEBUG );
OPT1 ( "--safetrace" ) g_bSafeTrace = true;
OPT1 ( "--test" ) { g_bWatchdog = false; bTestMode = true; } // internal option, do NOT document
OPT1 ( "--test-thd-pool" ) { g_bWatchdog = false; bTestMode = true; } // internal option, do NOT document
OPT1 ( "--force-pseudo-sharding" ) { bForcePseudoSharding = true; } // internal option, do NOT document
OPT1 ( "--strip-path" ) g_bStripPath = true;
OPT1 ( "--vtune" ) g_bVtune = true;
OPT1 ( "--noqlog" ) bOptDebugQlog = false;
OPT1 ( "--force-preread" ) bForcedPreread = true;
OPT1 ( "--coredump" ) g_bCoreDump = true;
OPT1 ( "--new-cluster" ) bNewCluster = true;
OPT1 ( "--new-cluster-force" ) bNewClusterForce = true;
OPT1 ( "--no_change_cwd" ) g_bNoChangeCwd = true;
// FIXME! add opt=(csv)val handling here
OPT1 ( "--replay-flags=accept-desc-timestamp" ) uReplayFlags |= Binlog::REPLAY_ACCEPT_DESC_TIMESTAMP;
OPT1 ( "--replay-flags=ignore-open-errors" ) uReplayFlags |= Binlog::REPLAY_IGNORE_OPEN_ERROR;
OPT1 ( "--replay-flags=ignore-trx-errors" ) uReplayFlags |= Binlog::REPLAY_IGNORE_TRX_ERROR;
OPT1 ( "--replay-flags=ignore-all-errors" ) uReplayFlags |= Binlog::REPLAY_IGNORE_ALL_ERRORS;
// handle 1-arg options
else if ( (i+1)>=argc ) break;
OPT ( "-c", "--config" ) szCmdConfigFile = argv[++i];
OPT ( "-p", "--port" ) { bOptPort = true; iOptPort = atoi ( argv[++i] ); }
OPT ( "-l", "--listen" ) { bOptListen = true; sOptListen = argv[++i]; }
OPT ( "-i", "--index" ) dOptIndexes.Add ( argv[++i] ); // FIXME!!! remove depricated cli option
OPT ( "-t", "--table" ) dOptIndexes.Add ( argv[++i] );
#if _WIN32
OPT1 ( "--servicename" ) ++i; // it's valid but handled elsewhere
#endif
// handle unknown options
else
break;
}
if ( i!=argc )
sphFatal ( "malformed or unknown option near '%s'; use '-h' or '--help' to see available options.", argv[i] );
SetupLemmatizerBase();
g_sConfigFile = sphGetConfigFile ( szCmdConfigFile );
#if _WIN32
// init WSA on Windows
// we need to do it this early because otherwise gethostbyname() from config parser could fail
WSADATA tWSAData;
int iStartupErr = WSAStartup ( WINSOCK_VERSION, &tWSAData );
if ( iStartupErr )
sphFatal ( "failed to initialize WinSock2: %s", sphSockError ( iStartupErr ) );
if ( !LoadExFunctions () )
sphFatal ( "failed to initialize extended socket functions: %s", sphSockError ( iStartupErr ) );
// i want my windows sessions to log onto stdout
// both in Debug and Release builds
if ( !g_bService )
g_bOptNoDetach = true;
#ifndef NDEBUG
// i also want my windows debug builds to skip locking by default
// NOTE, this also skips log files!
g_bOptNoLock = true;
#endif
#endif
if ( !bOptPIDFile )
bOptPIDFile = !g_bOptNoLock;
// check port and listen arguments early
if ( !g_bOptNoDetach && ( bOptPort || bOptListen ) )
{
sphWarning ( "--listen and --port are only allowed in --console debug mode; switch ignored" );
bOptPort = bOptListen = false;
}
if ( bOptPort )
{
if ( bOptListen )
sphFatal ( "please specify either --port or --listen, not both" );
CheckPort ( iOptPort );
}
/////////////////////
// parse config file
/////////////////////
auto dConfig = FetchAndCheckIfChanged ( g_sConfigFile ).second;
sphInfo( "using config file '%s' (%d chars)...", g_sConfigFile.cstr(), dConfig.GetLength());
// do parse
// don't aqcuire wlock, since we're in single main thread here.
FakeScopedWLock_T<> wFakeLock { g_tRotateConfigMutex };
if ( !ParseConfig ( &g_hCfg, g_sConfigFile, dConfig ) )
sphFatal ( "failed to parse config file '%s': %s", g_sConfigFile.cstr (), TlsMsg::szError() );
dConfig.Reset(); // make valgrind happy (that is not a leak, but produce 'still reachable' message)
const CSphConfig& hConf = g_hCfg;
if ( !hConf.Exists ( "searchd" ) || !hConf["searchd"].Exists ( "searchd" ) )
sphFatal ( "'searchd' config section not found in '%s'", g_sConfigFile.cstr () );
const CSphConfigSection & hSearchdpre = hConf["searchd"]["searchd"];
if ( !sphInitCharsetAliasTable ( sError ) )
sphFatal ( "failed to init charset alias table: %s", sError.cstr() );
////////////////////////
// stop running searchd
////////////////////////
if ( bOptStop )
{
StopOrStopWaitAnother ( hSearchdpre ( "pid_file" ), bOptStopWait );
assert ( 0 && "StopOrStopWaitAnother should not return " );
exit ( 0 );
}
////////////////////////////////
// query running searchd status
////////////////////////////////
if ( bOptStatus )
{
QueryStatus ( hSearchdpre("listen") );
exit ( 0 );
}
/////////////////////
// configure searchd
/////////////////////
sphInitCJson();
if ( !LoadConfigInt ( hConf, g_sConfigFile, sError ) )
sphFatal ( "%s", sError.cstr() );
ConfigureSearchd ( hConf, bOptPIDFile, bTestMode );
g_sExePath = sphGetCwd();
CheckSetCwd();
g_sConfigPath = sphGetCwd();
sphConfigureCommon ( hConf, FixPathAbsolute ); // this also inits plugins now
g_bWatchdog = hSearchdpre.GetInt ( "watchdog", g_bWatchdog )!=0;
if ( g_iMaxPacketSize<128*1024 || g_iMaxPacketSize>128*1024*1024 )
sphFatal ( "max_packet_size out of bounds (128K..128M)" );
if ( g_iMaxFilters<1 || g_iMaxFilters>10240 )
sphFatal ( "max_filters out of bounds (1..10240)" );
if ( g_iMaxFilterValues<1 || g_iMaxFilterValues>10485760 )
sphFatal ( "max_filter_values out of bounds (1..10485760)" );
bool bVisualLoad = true;
bool bWatched = false;
#if !_WIN32
// Let us start watchdog right now, on foreground first.
int iDevNull = open ( "/dev/null", O_RDWR );
if ( g_bWatchdog && !g_bOptNoDetach )
{
bWatched = true;
if ( !g_bOptNoLock )
OpenDaemonLog ( hConf["searchd"]["searchd"] );
bVisualLoad = SetWatchDog ( iDevNull );
OpenDaemonLog ( hConf["searchd"]["searchd"], true ); // just the 'IT Happens' magic - switch off, then on.
}
#endif
// here we either since plain startup, either being resurrected (forked) by watchdog.
// create the pid
if ( bOptPIDFile )
{
g_sPidFile = hSearchdpre["pid_file"].cstr();
FixPathAbsolute ( g_sPidFile );
g_iPidFD = ::open ( g_sPidFile.scstr(), O_CREAT | O_WRONLY, S_IREAD | S_IWRITE );
if ( g_iPidFD<0 )
sphFatal ( "failed to create pid file '%s': %s", g_sPidFile.scstr(), strerrorm(errno) );
}
if ( bOptPIDFile && !sphLockEx ( g_iPidFD, false ) )
sphFatal ( "failed to lock pid file '%s': %s (searchd already running?)", g_sPidFile.scstr(), strerrorm(errno) );
g_bPidIsMine = true;
// Actions on resurrection
if ( bWatched && !bVisualLoad )
{
if ( !LoadConfigInt ( hConf, g_sConfigFile, sError ) )
sphFatal ( "%s", sError.cstr() );
auto [bChanged, dNewConfig] = FetchAndCheckIfChanged ( g_sConfigFile );
if ( bChanged )
{
// reparse the config file
sphInfo ( "Reloading the config (%d chars)", dNewConfig.GetLength() );
// fake lock is acquired; no warnings will be fired
if ( !ParseConfig ( &g_hCfg, g_sConfigFile, dNewConfig ) )
sphFatal ( "failed to parse config file '%s': %s", g_sConfigFile.cstr (), TlsMsg::szError() );
if ( !LoadConfigInt ( hConf, g_sConfigFile, sError ) )
sphFatal ( "%s", sError.cstr() );
sphInfo ( "Reconfigure the daemon" );
ConfigureSearchd ( hConf, bOptPIDFile, bTestMode );
}
}
// hSearchdpre might be dead if we reloaded the config.
CSphConfigSection & hSearchd = hConf["searchd"]["searchd"];
// handle my signals
SetSignalHandlers ( g_bOptNoDetach );
// create logs
//if ( !g_bOptNoLock )
{
// create log
OpenDaemonLog ( hSearchd, true );
// create query log if required
if ( hSearchd.Exists ( "query_log" ) )
{
CSphString sQueryLog = hSearchd["query_log"].cstr();
if ( sQueryLog=="syslog" )
g_bQuerySyslog = true;
else
{
FixPathAbsolute ( sQueryLog );
g_iQueryLogFile = open ( sQueryLog.cstr(), O_CREAT | O_RDWR | O_APPEND, S_IREAD | S_IWRITE );
if ( g_iQueryLogFile<0 )
sphFatal ( "failed to open query log file '%s': %s", sQueryLog.cstr(), strerrorm(errno) );
LogChangeMode ( g_iQueryLogFile, g_iLogFileMode );
}
g_sQueryLogFile = sQueryLog.cstr();
}
}
#if !_WIN32
if ( !g_bOptNoDetach && !bWatched )
{
switch ( fork () )
{
case -1:
// error
sphFatalLog ( "fork() failed (reason: %s)", strerrorm ( errno ) );
exit ( 1 );
case 0:
// daemonized child
break;
default:
// tty-controlled parent
exit ( 0 );
}
}
#endif
LogTimeZoneStartup(sTZWarning);
// init before workpool, as last checks binlog
ModifyDaemonPaths ( hSearchd, FixPathAbsolute );
sphRTInit ( hSearchd.GetStr ( "binlog_path", bTestMode ? "" : LOCALDATADIR ),
hSearchd.GetBool ( "binlog_common", val_from_env ( "MANTICORE_BINLOG_COMMON", false ) ),
hConf("common") ? hConf["common"]("common") : nullptr );
// after next line executed we're in mt env, need to take rwlock accessing config.
StartGlobalWorkPool ();
// since that moment any 'fatal' will assume calling 'shutdown' function.
sphSetDieCallback ( DieOrFatalWithShutdownCb );
sphInfo( "starting daemon version '%s' ...", g_sStatusVersion.cstr() );
////////////////////
// network startup
////////////////////
CSphVector<ListenerDesc_t> dListenerDescs;
// command line arguments override config (but only in --console)
if ( bOptListen )
{
auto tDesc = ParseListener ( sOptListen.cstr() );
dListenerDescs.Add ( tDesc );
AddGlobalListener ( tDesc );
} else if ( bOptPort )
{
AddGlobalListener ( MakeAnyListener ( iOptPort ) );
} else
{
// listen directives in configuration file
for ( CSphVariant * v = hSearchd("listen"); v; v = v->m_pNext )
{
auto tDesc = ParseListener ( v->cstr () );
dListenerDescs.Add ( tDesc );
AddGlobalListener ( tDesc );
}
// default is to listen on our two ports
if ( g_dListeners.IsEmpty() )
{
AddGlobalListener ( MakeLocalhostListener ( SPHINXAPI_PORT, Proto_e::SPHINX ) );
AddGlobalListener ( MakeLocalhostListener ( SPHINXQL_PORT, Proto_e::MYSQL41 ) );
}
}
if ( !ValidateListenerRanges ( dListenerDescs, sError ) )
sphFatal ( "%s", sError.cstr() );
CSphString sSslCert ( hSearchd.GetStr ( "ssl_cert" ) );
CSphString sSslKey ( hSearchd.GetStr ( "ssl_key" ) );
CSphString sSslCa ( hSearchd.GetStr ( "ssl_ca" ) );
FixPathAbsolute ( sSslCert );
FixPathAbsolute ( sSslKey );
FixPathAbsolute ( sSslCa );
SetServerSSLKeys ( sSslCert, sSslKey, sSslCa );
CheckSSL();
// set up ping service (if necessary) before loading indexes
// (since loading ha-mirrors of distributed already assumes ping is usable).
if ( g_iPingIntervalUs>0 )
Ping::Start();
ScheduleMallocTrim();
CacheCPUInfo();
DetermineNodeItemStackSize();
DetermineFilterItemStackSize();
DetermineMatchStackSize();
// initialize timeouts since hook will use them
auto iRtFlushPeriodUs = hSearchd.GetUsTime64S ( "rt_flush_period", 36000000000ll ); // 10h
SetRtFlushPeriod ( Max ( iRtFlushPeriodUs, 3 * 1000000 ) ); // min 3S
g_pLocalIndexes->SetAddOrReplaceHook ( HookSubscribeMutableFlush );
//////////////////////
// build indexes hash
//////////////////////
// configure and preload
if ( bTestMode ) // pass this flag here prior to index config
sphRTSetTestMode();
if ( bForcePseudoSharding )
SetPseudoShardingThresh(0);
StrVec_t dExactIndexes;
for ( const auto &dOptIndex : dOptIndexes )
sphSplit ( dExactIndexes, dOptIndex.cstr (), "," );
SetPercolateQueryParserFactory ( PercolateQueryParserFactory );
Threads::CallCoroutine ( [&hConf, &dExactIndexes]() REQUIRES_SHARED ( g_tRotateConfigMutex )
{
ScopedRole_c thMain ( MainThread );
ConfigureAndPreloadOnStartup ( hConf, dExactIndexes );
} );
///////////
// startup
///////////
// ModifyDaemonPaths ( hSearchd );
// sphRTInit ( hSearchd, bTestMode, hConf("common") ? hConf["common"]("common") : nullptr );
if ( hSearchd.Exists ( "snippets_file_prefix" ) )
g_sSnippetsFilePrefix = hSearchd["snippets_file_prefix"].cstr();
else
g_sSnippetsFilePrefix.SetSprintf ( "%s/", g_sExePath.scstr() );
FixPathAbsolute ( g_sSnippetsFilePrefix );
{
auto sLogFormat = hSearchd.GetStr ( "query_log_format", "sphinxql" );
if ( sLogFormat=="sphinxql" )
g_eLogFormat = LOG_FORMAT_SPHINXQL;
else if ( sLogFormat!="plain" )
{
StrVec_t dParams;
sphSplit ( dParams, sLogFormat.cstr() );
for ( const auto& sParam : dParams )
{
if ( sParam=="sphinxql" )
g_eLogFormat = LOG_FORMAT_SPHINXQL;
else if ( sParam=="plain" )
g_eLogFormat = LOG_FORMAT_PLAIN;
else if ( sParam=="compact_in" )
g_bLogCompactIn = true;
}
}
}
if ( g_bLogCompactIn && g_eLogFormat==LOG_FORMAT_PLAIN )
sphWarning ( "compact_in option only supported with query_log_format=sphinxql" );
// prepare to detach
if ( !g_bOptNoDetach )
{
ReleaseTTYFlag();
#if !_WIN32
if ( !bWatched || bVisualLoad )
{
close ( STDIN_FILENO );
close ( STDOUT_FILENO );
close ( STDERR_FILENO );
dup2 ( iDevNull, STDIN_FILENO );
dup2 ( iDevNull, STDOUT_FILENO );
dup2 ( iDevNull, STDERR_FILENO );
}
#endif
}
if ( bOptPIDFile && !bWatched )
sphLockUn ( g_iPidFD );
Binlog::Configure ( hSearchd, uReplayFlags );
SetUidShort ( bTestMode );
InitDocstore ( g_iDocstoreCache );
InitSkipCache ( g_iSkipCache );
InitParserOption();
if ( bOptPIDFile )
{
#if !_WIN32
// re-lock pid
// FIXME! there's a potential race here
if ( !sphLockEx ( g_iPidFD, true ) )
sphFatal ( "failed to re-lock pid file '%s': %s", g_sPidFile.scstr(), strerrorm(errno) );
#endif
char sPid[16];
snprintf ( sPid, sizeof(sPid), "%d\n", (int)getpid() );
auto iPidLen = (int) strlen(sPid);
sphSeek ( g_iPidFD, 0, SEEK_SET );
if ( !sphWrite ( g_iPidFD, sPid, iPidLen ) )
sphFatal ( "failed to write to pid file '%s' (errno=%d, msg=%s)", g_sPidFile.scstr(),
errno, strerrorm(errno) );
if ( ::ftruncate ( g_iPidFD, iPidLen ) )
sphFatal ( "failed to truncate pid file '%s' (errno=%d, msg=%s)", g_sPidFile.scstr(),
errno, strerrorm(errno) );
}
#if _WIN32
SetConsoleCtrlHandler ( CtrlHandler, TRUE );
#endif
Threads::CallCoroutine( [bWatched] {
StrVec_t dFailed;
if ( !g_bOptNoDetach && !bWatched && !g_bService )
{
// re-lock indexes
ServedSnap_t hLocal = g_pLocalIndexes->GetHash();
for ( const auto& tIt : *hLocal )
{
sphLogDebug ( "Relocking %s", tIt.first.cstr () );
auto pServed = tIt.second;
// obtain exclusive lock
if ( !pServed )
dFailed.Add ( tIt.first );
RWIdx_c pIdx { pServed };
if ( !pIdx->Lock() )
{
sphWarning ( "table '%s': lock: %s; TABLE UNUSABLE", tIt.first.cstr(), pIdx->GetLastError().cstr() );
dFailed.Add ( tIt.first );
}
}
for ( const auto& sFailed : dFailed )
g_pLocalIndexes->Delete ( sFailed );
}
});
// if we're running in test console mode, dump queries to tty as well
// unless we're explicitly asked not to!
if ( hSearchd ( "query_log" ) && g_bOptNoLock && g_bOptNoDetach && bOptDebugQlog )
{
g_bQuerySyslog = false;
g_bLogSyslog = false;
g_iQueryLogFile = g_iLogFile;
}
#if USE_SYSLOG
if ( g_bLogSyslog || g_bQuerySyslog )
{
openlog ( "searchd", LOG_PID, LOG_DAEMON );
}
#else
if ( g_bQuerySyslog )
sphFatal ( "Wrong query_log file! You have to reconfigure --with-syslog and rebuild daemon if you want to use syslog there." );
#endif
/////////////////
// serve clients
/////////////////
#if _WIN32
if ( g_bService )
MySetServiceStatus ( SERVICE_RUNNING, NO_ERROR, 0 );
#endif
// replay last binlog
Threads::CallCoroutine ([]
{
auto _ = PublishSystemInfo ("replay binlog");
SmallStringHash_T<CSphIndex*> hIndexes;
ServedSnap_t hLocals = g_pLocalIndexes->GetHash();
for ( auto& tIt : *hLocals )
{
if ( tIt.second )
hIndexes.Add ( RWIdx_c ( tIt.second ), tIt.first );
}
Binlog::Replay ( hIndexes, DumpMemStat );
} );
// no need to create another cluster on restart by watchdog resurrection
if ( bWatched && !bVisualLoad )
{
bNewCluster = false;
bNewClusterForce = false;
}
StartRtBinlogFlushing();
ScheduleFlushAttrs();
SetupCompatHttp();
InitSearchdStats();
{
CSphString sSQLStateDefault;
if ( IsConfigless() )
sSQLStateDefault.SetSprintf ( "%s/state.sql", GetDataDirInt().cstr() );
CSphString sSQLStatePath { hSearchd.GetStr ( "sphinxql_state", sSQLStateDefault.scstr() ) };
FixPathAbsolute ( sSQLStatePath );
if ( !InitSphinxqlState ( sSQLStatePath, sError ))
sphWarning ( "sphinxql_state flush disabled: %s", sError.cstr ());
}
ServeUserVars ();
PrereadIndexes ( bForcedPreread );
// almost ready, time to start listening
g_iBacklog = hSearchd.GetInt ( "listen_backlog", g_iBacklog );
for ( const auto& dListener : g_dListeners )
if ( listen ( dListener.m_iSock, g_iBacklog )==-1 )
{
if ( sphSockGetErrno()==EADDRINUSE )
sphFatal ( "listen() failed with EADDRINUSE. A listener with other UID on same address:port?");
else
sphFatal ( "listen() failed: %s", sphSockError () );
}
// net thread needs non-blocking sockets
for ( const auto& dListener : g_dListeners )
{
if ( sphSetSockNB ( dListener.m_iSock )<0 )
{
sphWarning ( "sphSetSockNB() failed: %s", sphSockError() );
sphSockClose ( dListener.m_iSock );
}
if ( ( g_iTFO!=TFO_ABSENT ) && ( g_iTFO & TFO_LISTEN ) )
sphSetSockTFO ( dListener.m_iSock );
}
g_pTickPoolThread = Threads::MakeThreadPool ( g_iNetWorkers, "TickPool" );
WipeSchedulerOnFork ( g_pTickPoolThread );
PrepareClustersOnStartup ( dListenerDescs, bNewClusterForce );
g_dNetLoops.Resize ( g_iNetWorkers );
for ( auto & pNetLoop : g_dNetLoops )
{
pNetLoop = new CSphNetLoop;
pNetLoop->SetListeners ( g_dListeners );
if ( !GetAvailableNetLoop() )
SetAvailableNetLoop ( pNetLoop );
g_pTickPoolThread->Schedule ( [pNetLoop] { ScopedRole_c thPoll ( NetPoollingThread ); pNetLoop->LoopNetPoll (); }, false );
}
// until no threads started, schedule stopping of alone threads to very bottom
WipeGlobalSchedulerOnShutdownAndFork();
Detached::MakeAloneIteratorAvailable ();
// time for replication to sync with cluster
searchd::AddShutdownCb ( ReplicationServiceShutdown );
ReplicationServiceStart ( bNewCluster || bNewClusterForce );
searchd::AddShutdownCb ( BuddyShutdown );
// --test should not guess buddy path
// otherwise daemon generates warning message that counts as bad daemon restart by ubertest
if ( !bTestMode )
BuddyStart ( g_sBuddyPath, PluginGetDir(), g_bHasBuddyPath, dListenerDescs, g_bTelemetry, g_iThreads, g_sConfigFile, RealPath ( GetDataDirInt() ) );
g_bJsonConfigLoadedOk = true;
dListenerDescs.Reset(); // make valgrind happy
// ready, steady, go
sphInfo ( "accepting connections" );
// disable startup logging to stdout
if ( !g_bOptNoDetach )
g_bLogStdout = false;
while (true)
{
CrashLogger::SetupTimePID();
TickHead();
}
} // NOLINT ServiceMain() function length
inline int mainimpl ( int argc, char **argv )
{
// threads should be initialized before memory allocations
char cTopOfMainStack;
Threads::Init();
PrepareMainThread ( &cTopOfMainStack );
sphSetDieCallback ( DieOrFatalCb );
g_pLogger() = sphLog;
sphBacktraceSetBinaryName ( argv[0] );
GeodistInit();
#if _WIN32
int iNameIndex = -1;
for ( int i=1; i<argc; i++ )
{
if ( strcmp ( argv[i], "--ntservice" )==0 )
g_bService = true;
if ( strcmp ( argv[i], "--servicename" )==0 && (i+1)<argc )
{
iNameIndex = i+1;
g_sServiceName = argv[iNameIndex];
}
}
if ( g_bService )
{
for ( int i=0; i<argc; i++ )
g_dArgs.Add ( argv[i] );
if ( iNameIndex>=0 )
g_sServiceName = g_dArgs[iNameIndex].cstr ();
SERVICE_TABLE_ENTRY dDispatcherTable[] =
{
{ (LPSTR) g_sServiceName, (LPSERVICE_MAIN_FUNCTION)ServiceMain },
{ NULL, NULL }
};
if ( !StartServiceCtrlDispatcher ( dDispatcherTable ) )
sphFatal ( "StartServiceCtrlDispatcher() failed: %s", WinErrorInfo() );
return 0;
} else
#endif
return ServiceMain ( argc, argv );
}
#ifndef SUPRESS_SEARCHD_MAIN
int main ( int argc, char ** argv )
{
return mainimpl ( argc, argv );
}
#endif
volatile bool& sphGetGotSighup() noexcept
{
static bool bGotSighup = false;
return bGotSighup;
}
volatile bool& sphGetGotSigusr1() noexcept
{
static bool bGotSigusr1 = false;
return bGotSigusr1;
}
volatile bool & sphGetGotSigusr2 () noexcept
{
static bool bGotSigusr2 = false;
return bGotSigusr2;
}
| 653,799
|
C++
|
.cpp
| 18,059
| 33.386677
| 350
| 0.689919
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,902
|
sphinxql_debug.cpp
|
manticoresoftware_manticoresearch/src/sphinxql_debug.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxql_debug.h"
struct BlobLocator_t
{
int m_iStart;
int m_iLen;
};
using namespace DebugCmd;
class SqlDebugParser_c final : public SqlParserTraits_c
{
DebugCommand_t& m_tCmd;
public:
SqlDebugParser_c ( DebugCommand_t& tCmd, Str_t sQuery, CSphVector<SqlStmt_t>& dStmt, CSphString& sError )
: SqlParserTraits_c ( dStmt, sQuery.first, &sError )
, m_tCmd ( tCmd )
{
m_sErrorHeader = "P04:";
}
CSphString StrFromBlob ( BlobLocator_t tStr ) const
{
CSphString sResult;
sResult.SetBinary(m_pBuf+tStr.m_iStart, tStr.m_iLen);
return sResult;
}
ParsedOption_t& OptByName ( BlobLocator_t tStr )
{
auto sOption = StrFromBlob ( tStr );
sOption.ToLower ();
return m_tCmd.m_hOptions.AddUnique ( sOption );
}
void AddBoolOption ( BlobLocator_t tStr, bool bValue = true )
{
auto& tOption = OptByName ( tStr );
if ( bValue )
{
tOption.m_bValue = true;
tOption.m_iValue = 1;
tOption.m_fValue = 1.0f;
tOption.m_sValue = "true";
} else {
tOption.m_bValue = false;
tOption.m_iValue = 0;
tOption.m_fValue = 0.0f;
tOption.m_sValue = "false";
}
}
void AddStrOption ( BlobLocator_t tStr, BlobLocator_t tValue )
{
auto & tOption = OptByName ( tStr );
tOption.m_sValue = StrFromBlob ( tValue );
tOption.m_bValue = !tOption.m_sValue.IsEmpty ();
}
void AddStrOption ( const char* szStr, BlobLocator_t tValue )
{
auto& tOption = m_tCmd.m_hOptions.AddUnique ( szStr );
tOption.m_sValue = StrFromBlob ( tValue );
tOption.m_bValue = !tOption.m_sValue.IsEmpty();
}
void AddIntOption ( BlobLocator_t tStr, int64_t iValue )
{
auto & tOption = OptByName ( tStr );
tOption.m_iValue = iValue;
tOption.m_fValue = float(iValue);
tOption.m_bValue = iValue!=0;
}
void AddFloatOption ( BlobLocator_t tStr, float fValue )
{
auto & tOption = OptByName ( tStr );
tOption.m_fValue = fValue;
tOption.m_iValue = int ( fValue );
tOption.m_bValue = tOption.m_iValue!=0;
}
void SetWrongDebugCmd ()
{
SetCommand ( Cmd_e::PARSE_SYNTAX_ERROR );
}
void SetCommand ( Cmd_e eCmd )
{
m_tCmd.m_eCommand = eCmd;
}
bool CommandIs ( Cmd_e eCmd ) const noexcept
{
return m_tCmd.m_eCommand == eCmd;
}
void SetPar1 ( int64_t iPar )
{
m_tCmd.m_iPar1 = iPar;
}
void SetPar2 ( int64_t iPar )
{
m_tCmd.m_iPar2 = iPar;
}
void SetSParam ( BlobLocator_t tStr )
{
m_tCmd.m_sParam = StrFromBlob ( tStr );
}
void SetSParam2 ( BlobLocator_t tStr )
{
m_tCmd.m_sParam2 = StrFromBlob ( tStr );
}
};
bool DebugCommand_t::bOpt ( const char * szName, bool bDefault ) const
{
if ( !m_hOptions.Exists ( szName ) )
return bDefault;
return m_hOptions[szName].m_bValue;
}
int64_t DebugCommand_t::iOpt ( const char * szName, int64_t iDefault ) const
{
if ( !m_hOptions.Exists ( szName ) )
return iDefault;
return m_hOptions[szName].m_iValue;
}
float DebugCommand_t::fOpt ( const char * szName, float fDefault ) const
{
if ( !m_hOptions.Exists ( szName ) )
return fDefault;
return m_hOptions[szName].m_fValue;
}
CSphString DebugCommand_t::sOpt ( const char * szName, const char * szDefault ) const
{
CSphString sResult;
if ( !m_hOptions.Exists ( szName ) )
sResult = szDefault;
else
sResult = m_hOptions[szName].m_sValue;
return sResult;
}
// unused parameter, simply to avoid type clash between all my yylex() functions
#define YY_DECL inline int flex_debugparser ( YYSTYPE * lvalp, void * yyscanner, SqlDebugParser_c * pParser )
#include "flexsphinxql_debug.c"
static void yyerror ( SqlDebugParser_c* pParser, const char* szMessage )
{
// flex put a zero at last token boundary; make it undo that
yy4lex_unhold ( pParser->m_pScanner );
pParser->ProcessParsingError ( szMessage );
if ( pParser->IsWrongSyntaxError() )
pParser->SetWrongDebugCmd();
}
#ifndef NDEBUG
// using a proxy to be possible to debug inside yylex
inline int yylex ( YYSTYPE * lvalp, SqlDebugParser_c * pParser )
{
int res = flex_debugparser ( lvalp, pParser->m_pScanner, pParser );
return res;
}
#else
inline int yylex ( YYSTYPE * lvalp, SqlDebugParser_c * pParser )
{
return flex_debugparser ( lvalp, pParser->m_pScanner, pParser );
}
#endif
#include "bissphinxql_debug.c"
static std::unique_ptr<DebugCmd::DebugCommand_t> ParseDebugCmdImpl ( Str_t sQuery, CSphVector<SqlStmt_t>& dStmt, CSphString& sError )
{
auto pResult = std::make_unique<DebugCmd::DebugCommand_t>();
auto& tResult = *pResult;
if ( !IsFilled ( sQuery ) )
{
tResult.m_sParam = "query was empty";
return pResult;
}
SqlDebugParser_c tParser ( tResult, sQuery, dStmt, sError );
tParser.m_pBuf = sQuery.first;
tResult.m_szStmt = sQuery.first;
char * sEnd = const_cast<char *>( sQuery.first+sQuery.second );
sEnd[0] = 0; // prepare for yy_scan_buffer
sEnd[1] = 0; // this is ok because string allocates a small gap
yy4lex_init ( &tParser.m_pScanner );
YY_BUFFER_STATE tLexerBuffer = yy4_scan_buffer ( const_cast<char *>( sQuery.first ), sQuery.second+2, tParser.m_pScanner );
if ( !tLexerBuffer )
{
tResult.m_sParam = "internal error: yy4_scan_buffer() failed";
return pResult;
}
yyparse ( &tParser );
yy4_delete_buffer ( tLexerBuffer, tParser.m_pScanner );
yy4lex_destroy ( tParser.m_pScanner );
return pResult;
}
ParseResult_e ParseDebugCmd ( Str_t sQuery, CSphVector<SqlStmt_t>& dStmt, CSphString& sError )
{
// parse debug statements
auto pCmd = ParseDebugCmdImpl ( sQuery, dStmt, sError );
if ( !pCmd->Valid() )
return ParseResult_e::PARSE_SYNTAX_ERROR;
auto& tStmt = dStmt.Add();
tStmt.m_pDebugCmd = std::move ( pCmd );
tStmt.m_eStmt = STMT_DEBUG;
return ParseResult_e::PARSE_OK;
}
| 6,069
|
C++
|
.cpp
| 201
| 27.960199
| 133
| 0.718428
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,903
|
searchdreplication.cpp
|
manticoresoftware_manticoresearch/src/searchdreplication.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxstd.h"
#include "sphinxutils.h"
#include "memio.h"
#include "sphinxpq.h"
#include "searchdreplication.h"
#include "replication/configuration.h"
#include "accumulator.h"
#include "fileutils.h"
#include "coroutine.h"
#include "digest_sha1.h"
#include "tracer.h"
#include "replication/wsrep_cxx.h"
#include "replication/common.h"
#include "replication/portrange.h"
#include "replication/nodes.h"
#include "replication/commit_monitor.h"
#include "replication/receiver_ctx.h"
#include "replication/serialize.h"
#include "replication/cluster_delete.h"
#include "replication/cluster_get_nodes.h"
#include "replication/cluster_update_nodes.h"
#include "replication/cluster_synced.h"
#include "replication/replicate_index.h"
#include "replication/grastate.h"
#if !_WIN32
// MAC-specific header
#include <netinet/in.h>
#endif
// global application context for wsrep callbacks
// debug options passed into Galera for our logreplication command line key
static const char * const g_sDebugOptions = "debug=on;cert.log_conflicts=yes";
// prefix added for Galera nodes
static const char * const g_sGcommPrefix = "gcomm://";
// verbose logging of replcating transactions, ruled by this env variable
static const bool LOG_LEVEL_RPL_TNX = val_from_env ( "MANTICORE_LOG_RPL_TNX", false );
#define LOG_COMPONENT_RPL_TNX ""
#define RPL_TNX LOGMSG ( RPL_DEBUG, RPL_TNX, RPL_TNX )
inline static bool IsSyncedOrDonor ( ClusterState_e eState ) noexcept
{
return ( eState == ClusterState_e::DONOR || eState == ClusterState_e::SYNCED );
}
enum class BOOTSTRAP_E : bool {NO,YES};
// cluster related data
struct ReplicationCluster_t final : public ClusterDesc_t, Wsrep::Cluster_i
{
public:
// replicator
Wsrep::Provider_i* m_pProvider = nullptr;
// serializer for replicator - guards for only one replication Op a time
Threads::Coro::Mutex_c m_tReplicationMutex;
// receiver thread
Threads::Coro::Waitable_T<bool> m_bWorkerActive { false };
// Galera port got from global ports list on cluster created
ScopedPort_c m_tPort;
bool m_bUserRequest { false }; // indicates, if cluster is joining by user request (i.e. stmt 'join ...')
// state variables cached from Galera
Wsrep::UUID_t m_dUUID {};
int64_t m_iConfID = 0;
Wsrep::ViewStatus_e m_eStatus = Wsrep::ViewStatus_e::DISCONNECTED;
int m_iSize = 0;
int m_iIdx = 0;
// error that got reported to main thread
CSphMutex m_tErrorLock;
StringBuilder_c m_sError GUARDED_BY ( m_tErrorLock ) { ";" };
private:
~ReplicationCluster_t() final; // private since ref-counted
void AbortSST();
public:
explicit ReplicationCluster_t ( ClusterDesc_t&& tDesc )
{
m_sName = std::move ( tDesc.m_sName );
m_sPath = std::move ( tDesc.m_sPath );
m_dClusterNodes = std::move ( tDesc.m_dClusterNodes );
m_tOptions = std::move ( tDesc.m_tOptions );
}
// state of node
void SetState ( ClusterState_e eNodeState );
ClusterState_e GetState() const noexcept;
const char* szState() const noexcept;
ClusterState_e WaitReady();
bool IsHealthy() const;
bool Init();
bool Connect ( BOOTSTRAP_E eBootStrap );
void StartListen();
void DisconnectAndDeleteProvider();
void UpdateGroupView ( const Wsrep::ViewInfo_t* pView ) EXCLUDES ( m_tViewNodesLock );
StrVec_t FilterViewNodesByProto ( Proto_e eProto = Proto_e::SPHINX, bool bResolve = true ) const EXCLUDES ( m_tViewNodesLock );
StrVec_t GetViewNodes() const EXCLUDES ( m_tViewNodesLock );
void SetViewNodes ( StrVec_t&& dNodes );
StrVec_t GetIndexes() const noexcept EXCLUDES ( m_tIndexLock );
const CSphString & GetNodeName () const { return m_sNodeName; }
template<typename ACTION>
void FilterViewNodes ( ACTION&& Verb ) const
{
Threads::SccRL_t tNodesRLock ( m_tViewNodesLock );
m_dViewNodes.Apply ( std::forward<ACTION> ( Verb ) );
}
void ShowStatus ( VectorLike& dOut );
void HeartBeat ()
{
m_tHeardBeat.NotifyAll();
}
template<typename PRED>
void WaitHeartBeat ( PRED&& fnPred )
{
m_tHeardBeat.WaitVoid ( std::forward<PRED> ( fnPred ) );
}
template<typename PRED>
bool WaitHeartBeatForMs ( PRED&& fnPred, int64_t iPeriodMs )
{
return m_tHeardBeat.WaitVoidForMs ( std::forward<PRED> ( fnPred ), iPeriodMs );
}
template<typename PRED>
ClusterState_e WaitAny (PRED&& fnPred)
{
return m_tNodeState.Wait ( std::forward<PRED> ( fnPred ) );
}
template<typename PRED>
ClusterState_e WaitAnyForMs (PRED&& fnPred, int64_t iPeriodMs )
{
return m_tNodeState.WaitForMs ( std::forward<PRED> ( fnPred ), iPeriodMs );
}
/// Cluster_i implementation
///////////////////////////
void ChangeView ( const Wsrep::ViewInfo_t* pView, const char* pState, uint64_t iStateLen, void** ppSstReq, uint64_t* pSstReqLen ) final;
// callback for Galera synced_cb that cluster fully synced and could accept transactions
void SetSynced() final;
bool DonateSST ( CSphString sJoiner, const Wsrep::GlobalTid_t* pStateID, bool bBypass ) final;
void OnRecvStarted() final;
void OnRecvFinished ( bool bSuccess ) final;
bool IsPrimary() const noexcept { return ( m_eStatus == Wsrep::ViewStatus_e::PRIMARY ); }
template<typename VISITOR>
auto WithRlockedIndexes ( VISITOR fnVisitor ) const noexcept EXCLUDES ( m_tIndexLock )
{
Threads::SccRL_t tIndexRLock ( m_tIndexLock );
return fnVisitor ( m_hIndexes );
}
template<typename VISITOR>
auto WithRlockedAllIndexes ( VISITOR fnVisitor ) const noexcept EXCLUDES ( m_tIndexLock )
{
Threads::SccRL_t tIndexRLock ( m_tIndexLock );
return fnVisitor ( m_hIndexes, m_hIndexesLoaded );
}
template<typename VISITOR>
auto WithWlockedIndexes ( VISITOR fnVisitor ) EXCLUDES ( m_tIndexLock )
{
Threads::SccWL_t tIndexWLock ( m_tIndexLock );
return fnVisitor ( m_hIndexes, m_hIndexesLoaded );
}
template<typename VISITOR>
auto WithRlockedOptions ( VISITOR fnVisitor ) const noexcept EXCLUDES ( m_tOptsLock )
{
Threads::SccRL_t tOptionsRLock ( m_tOptsLock );
return fnVisitor ( m_tOptions );
}
template<typename VISITOR>
auto WithWlockedOptions ( VISITOR fnVisitor ) EXCLUDES ( m_tOptsLock )
{
Threads::SccWL_t tOptionsWLock ( m_tOptsLock );
return fnVisitor ( m_tOptions );
}
private:
Threads::Coro::Waitable_T<ClusterState_e> m_tNodeState { ClusterState_e::CLOSED };
Threads::Coro::Waitable_T<bool> m_tHeardBeat { false };
// nodes at cluster
// raw nodes addresses (API and replication) from whole cluster
mutable Threads::Coro::RWLock_c m_tViewNodesLock;
StrVec_t m_dViewNodes GUARDED_BY ( m_tViewNodesLock );
mutable Threads::Coro::RWLock_c m_tOptsLock;
mutable Threads::Coro::RWLock_c m_tIndexLock;
sph::StringSet m_hIndexesLoaded; // list of index name loaded into daemon but not yet in cluster used for donor to send indexes into joiner
#ifndef NDEBUG
// it is impossible to attach 'GUARDED_BY to 'using ClusterDesc_t::m_tOptions', so use reference for it debug build
ClusterOptions_t& m_tOptions GUARDED_BY ( m_tOptsLock ) { ClusterDesc_t::m_tOptions };
sph::StringSet& m_hIndexes GUARDED_BY ( m_tIndexLock ) { ClusterDesc_t::m_hIndexes }; // to quickly validate query to cluster:index
#else
using ClusterDesc_t::m_tOptions;
using ClusterDesc_t::m_hIndexes;
#endif
CSphString m_sNodeName;
};
using ReplicationClusterRefPtr_c = CSphRefcountedPtr<ReplicationCluster_t>;
// serializer for cluster management operations - only one cluster operation a time
static Threads::Coro::Mutex_c g_tClusterOpsLock;
// cluster list
static Threads::Coro::RWLock_c g_tClustersLock;
static SmallStringHash_T<ReplicationClusterRefPtr_c> g_hClusters GUARDED_BY ( g_tClustersLock );
ReplicationClusterRefPtr_c ClusterByName ( const CSphString& sCluster, const char* szErrTmpl = "unknown cluster '%s'" ) EXCLUDES ( g_tClustersLock )
{
ReplicationClusterRefPtr_c pCluster;
Threads::SccRL_t tLock ( g_tClustersLock );
if ( !g_hClusters.Exists ( sCluster ) )
{
if ( szErrTmpl )
TlsMsg::Err ( szErrTmpl, sCluster.cstr() );
} else
pCluster = g_hClusters[sCluster];
return pCluster;
}
static bool CheckClusterIndexes ( const VecTraits_T<CSphString> & dIndexes, ReplicationClusterRefPtr_c pCluster )
{
auto fnCmdValidate = [&]( const auto & hIndexes )
{
for ( const CSphString & sIndex : dIndexes )
{
if ( !hIndexes[sIndex] )
return TlsMsg::Err ( "table '%s' doesn't belong to cluster '%s'", sIndex.cstr(), pCluster->m_sName.cstr() );
}
return true;
};
return pCluster->WithRlockedIndexes ( fnCmdValidate );
}
static bool CheckClusterIndex ( const CSphString & sIndex, ReplicationClusterRefPtr_c pCluster )
{
return pCluster->WithRlockedIndexes ( [&]( const auto & hIndexes )
{
if ( !hIndexes[sIndex] )
return TlsMsg::Err ( "table '%s' doesn't belong to cluster '%s'", sIndex.cstr(), pCluster->m_sName.cstr() );
return true;
});
}
/////////////////////////////////////////////////////////////////////////////
/// forward declarations
/////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////
/// remote commands for cluster and index managements
/////////////////////////////////////////////////////////////////////////////
// send local indexes to remote nodes via API
static bool SendClusterIndexes ( const ReplicationCluster_t * pCluster, const CSphString & sNode, bool bBypass, const Wsrep::GlobalTid_t & tStateID );
static bool ValidateUpdate ( const ReplicationCommand_t & tCmd );
static bool DoClusterAlterUpdate ( const CSphString & sCluster, const CSphString & sUpdate, NODES_E eUpdate );
static bool IsSameVector ( StrVec_t & dSrc, StrVec_t & dDst );
static bool g_bReplicationEnabled = false;
static CSphString g_sReplicationStartError;
static bool g_bReplicationStarted = false;
bool ReplicationEnabled()
{
return g_bReplicationEnabled;
}
static constexpr const char * szNodeState ( ClusterState_e eState )
{
switch ( eState )
{
case ClusterState_e::CLOSED: return "closed";
case ClusterState_e::DESTROYED: return "destroyed";
case ClusterState_e::JOINING: return "joining";
case ClusterState_e::DONOR: return "donor";
case ClusterState_e::SYNCED: return "synced";
default: return "undefined";
}
};
const char* ReplicationCluster_t::szState() const noexcept
{
return ::szNodeState ( GetState() );
}
void ReplicationCluster_t::SetState ( ClusterState_e eNodeState )
{
m_tNodeState.SetValue ( eNodeState );
m_tNodeState.NotifyAll();
}
ClusterState_e ReplicationCluster_t::GetState() const noexcept
{
return m_tNodeState.GetValue();
}
ClusterState_e ReplicationCluster_t::WaitReady()
{
return WaitAny ( [] ( ClusterState_e i ) { return i == ClusterState_e::SYNCED || i == ClusterState_e::DESTROYED; } );
}
bool ReplicationCluster_t::IsHealthy() const
{
if ( !IsPrimary() )
return TlsMsg::Err ( "cluster '%s' is not ready, not primary state (%s)", m_sName.cstr(), szState() );
auto eState = GetState();
return eState == ClusterState_e::SYNCED
|| eState == ClusterState_e::DONOR
|| TlsMsg::Err ( "cluster '%s' is not ready, current state is %s", m_sName.cstr(), szState() );
}
static int GetClusterMemLimitMB ( int iMemLimit, int iIndexes )
{
const int CACHE_PER_INDEX = 16;
const int MIN_CACHE_SIZE = 128;
// change default cache size to 16Mb per added index or size of largest rt_mem_limit of RT index but at least 128Mb
int iSize = iMemLimit / 1024 / 1024;
iIndexes = Max ( 1, iIndexes );
return Max ( Max ( iIndexes * CACHE_PER_INDEX, iSize ), MIN_CACHE_SIZE );
}
bool ReplicationCluster_t::Init()
{
assert ( ReplicationEnabled() );
CSphString sListenAddr, sIncoming, sFullClusterPath;
m_sNodeName.SetSprintf ( "node_%s_%s_%d", szIncomingIP(), m_sName.cstr(), GetOsThreadId() );
sListenAddr.SetSprintf ( "%s:%d", szListenReplicationIP(), int ( m_tPort ) );
sIncoming.SetSprintf ( "%s,%s:%d:replication", szIncomingProto(), szIncomingIP(), int ( m_tPort ) );
sFullClusterPath = GetDatadirPath ( m_sPath );
sphLogDebugRpl ( "node incoming '%s', listen '%s', name '%s'", sIncoming.cstr(), sListenAddr.cstr(), m_sNodeName.cstr() );
StringBuilder_c sOptions ( ";" );
WithRlockedOptions ( [&sOptions] ( const auto& tOptions ) { sOptions << tOptions.AsStr(); } );
// all replication options below have not stored into cluster config
// set incoming address
if ( HasIncoming() )
{
sOptions.Sprintf ( "ist.recv_addr=%s", szIncomingIP() );
sOptions << "ist.recv_bind=0.0.0.0";
}
// change default cache size to 16Mb per added index but not more than default value
bool bHaveGcache = false;
WithRlockedOptions ( [&bHaveGcache] ( const auto& tOptions ) { bHaveGcache = tOptions.m_hOptions.Exists ( "gcache.size" ); } );
if ( !bHaveGcache )
{
int iMaxMemLimit = 0;
int iIndexes = 0;
WithRlockedIndexes ( [&iMaxMemLimit, &iIndexes] ( const auto& hIndexes ) {
iIndexes = hIndexes.GetLength();
for_each ( hIndexes, [&iMaxMemLimit] ( const auto& hIndex ) {
cServedIndexRefPtr_c pServed = GetServed ( hIndex.first );
if ( pServed && pServed->m_tSettings.m_iMemLimit > iMaxMemLimit )
iMaxMemLimit = pServed->m_tSettings.m_iMemLimit;
} );
} );
int iSizeMB = GetClusterMemLimitMB ( iMaxMemLimit, iIndexes );
sOptions.Sprintf ( "gcache.size=%dM", iSizeMB );
}
// set debug log option
if ( g_eLogLevel >= SPH_LOG_RPL_DEBUG )
sOptions += g_sDebugOptions;
m_pProvider = Wsrep::MakeProvider ( this, m_sNodeName, sListenAddr.cstr(), sIncoming.cstr(), sFullClusterPath.cstr(), sOptions.cstr() );
return bool ( m_pProvider );
}
bool ReplicationCluster_t::Connect ( BOOTSTRAP_E eBootStrap )
{
StringBuilder_c sNodes;
sNodes << g_sGcommPrefix;
sNodes.StartBlock ( "," );
FilterViewNodesByProto ( Proto_e::REPLICATION ).Apply ( [&sNodes] ( const CSphString& sNode ) { sNodes << sNode; } );
sphLogDebugRpl ( "nodes '%s'", sNodes.cstr() );
if ( g_eLogLevel >= SPH_LOG_RPL_DEBUG ) {
StringBuilder_c sIndexes ( "," );
WithRlockedIndexes ( [&sIndexes] ( const auto& hIndexes ) { for_each ( hIndexes, [&] ( const auto& tIndex ) { sIndexes << tIndex.first; } ); } );
sphLogDebugRpl ( "cluster '%s', indexes '%s', nodes '%s'", m_sName.cstr(), sIndexes.cstr(), sNodes.cstr() );
}
// Connect to cluster
return m_pProvider->Connect ( m_sName.cstr(), sNodes.cstr(), ( eBootStrap == BOOTSTRAP_E::YES ) );
}
void ReplicationCluster_t::StartListen()
{
CSphRefcountedPtr<Wsrep::Receiver_i> pReceiver { MakeReceiverCtx ( m_sName, m_pProvider, [this]() { HeartBeat(); } ) };
m_pProvider->StartListen ( pReceiver.Ptr() );
sphLogDebugRpl ( "replicator is created for cluster '%s'", m_sName.cstr() );
}
// update cluster state nodes from Galera callback on cluster view changes
void ReplicationCluster_t::UpdateGroupView ( const Wsrep::ViewInfo_t* pView )
{
const auto* pBoxes = &pView->m_tMemInfo;
StrVec_t dNodes;
for ( int i = 0; i < pView->m_iNMembers; ++i )
dNodes.Append ( ParseNodesFromString ( pBoxes[i].m_sIncoming ) );
sphLogDebugRpl ( "cluster '%s' view nodes changed: %s > %s", m_sName.cstr(), StrVec2Str ( GetViewNodes() ).cstr(), StrVec2Str ( dNodes ).cstr() );
SetViewNodes ( std::move ( dNodes ) );
}
CSphString WaitClusterReady ( const CSphString& sCluster, int64_t iTimeoutS )
{
auto pCluster = ClusterByName ( sCluster );
if ( !pCluster )
return TlsMsg::MoveToString();
ClusterState_e eState;
if ( iTimeoutS<=0 )
eState = pCluster->WaitAny ( [] ( ClusterState_e i ) { return i == ClusterState_e::SYNCED || i == ClusterState_e::DONOR; } );
else
eState = pCluster->WaitAnyForMs ( [] ( ClusterState_e i ) { return i == ClusterState_e::SYNCED || i == ClusterState_e::DONOR; }, iTimeoutS*1000 );
return szNodeState ( eState );
}
std::pair<int, CSphString> WaitClusterCommit ( const CSphString& sCluster, int iTxn, int64_t iTimeoutS )
{
auto pCluster = ClusterByName ( sCluster );
if (!pCluster)
return {-1,TlsMsg::MoveToString()};
int64_t iVal = -1;
auto fnPred = [iTxn, &iVal, pProvider=pCluster->m_pProvider]()
{
if ( !pProvider )
return false;
assert ( pProvider );
pProvider->EnumFindStatsVar ( [&iVal] ( const Wsrep::StatsVars_t& tVar ) -> bool {
if ( 0 != strcmp ( tVar.m_szName, "last_committed" ) )
return false;
iVal = tVar.m_tValue.iVal64;
return true;
} );
return iVal >= iTxn;
};
bool bSuccess = true;
if ( iTimeoutS <= 0 )
pCluster->WaitHeartBeat ( std::move ( fnPred ) );
else
bSuccess = pCluster->WaitHeartBeatForMs ( std::move ( fnPred ), iTimeoutS * 1000 );
if ( !bSuccess )
return { iVal, "timeout" };
return { iVal, "" };
}
// callback for Galera that cluster view got changed, ie node either added or removed from cluster.
// It is guaranteed that no other callbacks are called concurrently with it.
void ReplicationCluster_t::ChangeView ( const Wsrep::ViewInfo_t* pView, const char* pState, uint64_t uStateLen, void** ppSstReq, uint64_t* pSstReqLen )
{
m_dUUID = pView->m_tStateId.m_tUuid;
m_iConfID = pView->m_ViewSeqNo;
m_iSize = pView->m_iNMembers;
m_iIdx = pView->m_iIdx;
m_eStatus = pView->m_eStatus;
if ( IsPrimary() )
UpdateGroupView ( pView );
*ppSstReq = nullptr;
*pSstReqLen = 0;
if ( !pView->m_bGap )
return;
auto sAddr = FromSz ( szIncomingProto() );
sphLogDebugRpl ( "join %s to %s", sAddr.first, m_sName.cstr() );
*ppSstReq = memcpy ( malloc ( sAddr.second ), sAddr.first, sAddr.second ); // mem will be freed by Galera
*pSstReqLen = sAddr.second;
SetState ( ClusterState_e::JOINING );
}
// callback for Galera synced_cb that cluster fully synced and could accept transactions
void ReplicationCluster_t::SetSynced()
{
SetState ( ClusterState_e::SYNCED );
sphLogDebugRpl ( "synced cluster %s", m_sName.cstr() );
}
bool ReplicationCluster_t::DonateSST ( CSphString sJoiner, const Wsrep::GlobalTid_t* pStateID, bool bBypass )
{
auto tGtid = *pStateID;
sphLogDebugRpl ( "donate %s to %s, gtid %s, bypass %d", m_sName.cstr(), sJoiner.cstr(), Wsrep::Gtid2Str ( tGtid ).cstr(), (int)bBypass );
SetState ( ClusterState_e::DONOR );
const bool bOk = SendClusterIndexes ( this, sJoiner, bBypass, tGtid );
SetState ( ClusterState_e::SYNCED );
if ( !bOk ) {
sphWarning ( "%s", TlsMsg::szError() );
tGtid.m_iSeqNo = Wsrep::WRONG_SEQNO;
}
m_pProvider->SstSent ( tGtid, bOk );
sphLogDebugRpl ( "donate cluster %s to %s, gtid %s, bypass %d, done %d", m_sName.cstr(), sJoiner.cstr(), Wsrep::Gtid2Str ( *pStateID ).cstr(), (int)bBypass, (int)bOk );
return bOk;
}
void ReplicationCluster_t::OnRecvStarted()
{
m_bWorkerActive.SetValue ( true );
SetState ( ClusterState_e::JOINING );
}
void ReplicationCluster_t::OnRecvFinished ( bool bSuccess )
{
SetState ( bSuccess ? ClusterState_e::CLOSED : ClusterState_e::DESTROYED );
m_bWorkerActive.SetValueAndNotifyAll ( false );
}
void ReplicationCluster_t::AbortSST()
{
if ( GetState() != ClusterState_e::JOINING )
return;
assert ( m_pProvider );
sphLogDebugRpl ( "aborting SST" );
Wsrep::GlobalTid_t tGtid {};
m_pProvider->SstReceived ( tGtid, -ECANCELED );
}
// shutdown and delete cluster, also join cluster recv thread
void ReplicationCluster_t::DisconnectAndDeleteProvider()
{
if ( !m_pProvider )
return;
AbortSST ();
sphLogDebugRpl ( "disconnecting from cluster %s", m_sName.cstr() );
m_pProvider->Disconnect();
sphLogDebugRpl ( "disconnected from cluster %s", m_sName.cstr() );
}
ReplicationCluster_t::~ReplicationCluster_t()
{
sphLogDebugRpl ( "cluster '%s' wait to finish", m_sName.scstr() );
// Listening thread are now running and receiving writesets. Wait for them
// to join. Thread will join after signal handler closes wsrep connection
m_bWorkerActive.Wait ( [] ( bool bWorking ) { return !bWorking; } );
HeartBeat();
sphLogDebugRpl ( "deleting provider of cluster %s", m_sName.cstr() );
SafeDelete ( m_pProvider );
sphLogDebugRpl ( "cluster '%s' finished, cluster deleted", m_sName.scstr() );
}
// add 'RPL' flag - i.e., that we're working in replication
struct RPLRep_t: public MiniTaskInfo_t
{
DECLARE_RENDER ( RPLRep_t );
};
DEFINE_RENDER ( RPLRep_t )
{
auto& tInfo = *(RPLRep_t*)const_cast<void*> ( pSrc );
dDst.m_sDescription.Sprintf ( "(RPL %.2T)", tInfo.m_tmStart );
dDst.m_sChain << "RPL ";
}
// repl version
// replicate serialized data into cluster and call commit monitor along
static bool Replicate ( const VecTraits_T<uint64_t>& dKeys, const VecTraits_T<BYTE>& tQueries, Wsrep::Writeset_i& tWriteSet, CommitMonitor_c&& tMonitor, bool bUpdate, bool bSharedKeys )
{
TRACE_CONN ( "conn", "Replicate" );
// just displays 'RPL' flag.
auto RPL = PublishTaskInfo ( new RPLRep_t );
bool bOk = tWriteSet.AppendKeys ( dKeys, bSharedKeys ) && tWriteSet.AppendData ( tQueries );
if ( !bOk )
return false;
auto dFinalReport = AtScopeExit ( [&tWriteSet] {
sphLogDebugRpl ( "%s seq " INT64_FMT, ( tWriteSet.LastOk() ? "committed" : "rolled-back" ), tWriteSet.LastSeqno() );
} );
TRACE_CONN ( "conn", "pProvider->replicate" );
if ( !tWriteSet.Replicate() )
{
RPL_TNX << "replicating " << (int)bOk << ", seq " << tWriteSet.LastSeqno() << ", commands " << dKeys.GetLength();
return false;
}
if ( !tWriteSet.PreCommit() )
return false;
// in case only commit failed
// need to abort running transaction prior to rollback
if ( !bUpdate )
bOk = tMonitor.Commit ();
else
bOk = tMonitor.UpdateTOI ();
if ( !bOk )
{
tWriteSet.AbortPreCommit();
return false;
}
tWriteSet.InterimCommit();
tWriteSet.PostCommit();
return true;
}
// replicate serialized data into cluster in TotalOrderIsolation mode and call commit monitor along
static bool ReplicateTOI ( const VecTraits_T<uint64_t> & dKeys, const VecTraits_T<BYTE> & tQueries, Wsrep::Writeset_i & tWriteSet, CommitMonitor_c && tMonitor )
{
bool bOk = tWriteSet.ToExecuteStart ( dKeys, tQueries );
sphLogDebugRpl ( "replicating TOI %d, seq " INT64_FMT, (int)bOk, tWriteSet.LastSeqno() );
if ( !bOk )
return false;
// FIXME!!! can not fail TOI transaction
tMonitor.CommitTOI();
tWriteSet.ToExecuteEnd();
sphLogDebugRpl ( "%s seq " INT64_FMT, tWriteSet.LastOk() ? "commited" : "rolled-back", tWriteSet.LastSeqno() );
return tWriteSet.LastOk();
}
// get cluster status variables (our and Galera)
void ReplicationCluster_t::ShowStatus ( VectorLike& dOut )
{
if ( !m_pProvider )
return;
assert ( m_eStatus >= Wsrep::ViewStatus_e::PRIMARY && m_eStatus < Wsrep::ViewStatus_e::MAX );
const char* sName = m_sName.cstr();
// cluster vars
if ( dOut.MatchAdd ( "cluster_name" ) )
dOut.Add ( m_sName );
if ( dOut.MatchAddf ( "cluster_%s_state_uuid", sName ) )
dOut.Add ( Wsrep::Uuid2Str ( m_dUUID ) );
if ( dOut.MatchAddf ( "cluster_%s_conf_id", sName ) )
dOut.Add().SetSprintf ( INT64_FMT, m_iConfID );
if ( dOut.MatchAddf ( "cluster_%s_status", sName ) )
dOut.Add() = Wsrep::GetViewStatus ( m_eStatus );
if ( dOut.MatchAddf ( "cluster_%s_size", sName ) )
dOut.Add().SetSprintf ( "%d", m_iSize );
if ( dOut.MatchAddf ( "cluster_%s_local_index", sName ) )
dOut.Add().SetSprintf ( "%d", m_iIdx );
if ( dOut.MatchAddf ( "cluster_%s_node_state", sName ) )
dOut.Add ( szState() );
// nodes of cluster defined and view
if ( dOut.MatchAddf ( "cluster_%s_nodes_set", sName ) )
dOut.Add ( StrVec2Str ( m_dClusterNodes ).cstr() );
if ( dOut.MatchAddf ( "cluster_%s_nodes_view", sName ) )
{
StringBuilder_c sNodes ( "," );
FilterViewNodes ( [&sNodes] ( const CSphString& sNode ) { sNodes << sNode; } );
dOut.Add ( sNodes.cstr() );
}
// cluster indexes
if ( dOut.MatchAddf ( "cluster_%s_indexes_count", sName ) )
WithRlockedIndexes ( [&dOut] ( const auto& hIndexes ) { dOut.Addf ( "%d", hIndexes.GetLength() ); } );
if ( dOut.MatchAddf ( "cluster_%s_indexes", sName ) )
{
StringBuilder_c tBuf ( "," );
WithRlockedIndexes ( [&tBuf] ( const auto& hIndexes ) { for_each ( hIndexes, [&] ( const auto& tIndex ) { tBuf << tIndex.first; } ); } );
dOut.Add ( tBuf.cstr() );
}
// show last cluster error if any
if ( dOut.Matchf ( "cluster_%s_last_error", sName ) )
{
ScopedMutex_t tErrorLock ( m_tErrorLock );
if ( !m_sError.IsEmpty() ) {
dOut.Addf ( "cluster_%s_last_error", sName );
dOut.Add ( m_sError.cstr() );
}
}
// cluster status
m_pProvider->EnumStatsVars ( [&dOut,&sName] ( const Wsrep::StatsVars_t& tVar ) {
if ( !dOut.MatchAddf ( "cluster_%s_%s", sName, tVar.m_szName ) )
return;
switch ( tVar.m_eType )
{
case Wsrep::StatsVars_t::STRING:
dOut.Add ( tVar.m_tValue.szString );
return;
case Wsrep::StatsVars_t::DOUBLE:
dOut.Addf ( "%f", tVar.m_tValue.fDouble );
return;
case Wsrep::StatsVars_t::INT64:
dOut.Addf ( "%l", tVar.m_tValue.iVal64 );
return;
default:
assert ( 0 && "Internal error" );
}
} );
}
// set Galera option for cluster
bool ReplicateSetOption ( const CSphString& sCluster, const CSphString& sName, const CSphString& sVal )
{
auto pCluster = ClusterByName ( sCluster, "unknown cluster '%s' in SET statement" );
if ( !pCluster )
return false;
if ( !pCluster->m_pProvider->OptionsSet ( sName, sVal ) )
return false;
pCluster->WithWlockedOptions ( [&] ( ClusterOptions_t& hOptions ) { hOptions.m_hOptions.Add ( sVal, sName ); } );
return true;
}
// delete all clusters on daemon shutdown
void ReplicationServiceShutdown() EXCLUDES ( g_tClustersLock )
{
Threads::CallCoroutine ( [] {
Threads::SccWL_t wLock ( g_tClustersLock );
if ( g_hClusters.IsEmpty() )
return;
sphLogDebugRpl ( "clusters (%d) delete invoked", g_hClusters.GetLength() );
for ( auto & tCluster : g_hClusters )
{
sphLogDebugRpl ( "cluster '%s' delete", tCluster.first.cstr() );
tCluster.second->DisconnectAndDeleteProvider();
}
sphLogDebugRpl ( "clusters delete done" );
g_hClusters.Reset();
} );
}
// collect clusters and their indexes
CSphVector<ClusterDesc_t> ReplicationCollectClusters () EXCLUDES ( g_tClustersLock )
{
CSphVector<ClusterDesc_t> dClusters;
if ( !ReplicationEnabled() )
return dClusters;
Threads::SccRL_t tLock ( g_tClustersLock );
for ( const auto& tCluster : g_hClusters )
{
// should save all clusters on start
// but skip cluster that just joining from user request
if ( tCluster.second->GetState() != ClusterState_e::JOINING || !tCluster.second->m_bUserRequest )
dClusters.Add ( *tCluster.second );
}
return dClusters;
}
// dump all clusters statuses
void ReplicateClustersStatus ( VectorLike & dStatus ) EXCLUDES ( g_tClustersLock )
{
Threads::SccRL_t tLock ( g_tClustersLock );
for_each ( g_hClusters, [&dStatus] (const auto& tCluster) { tCluster.second->ShowStatus ( dStatus ); });
}
// check whether index with given name exists and it is mutable (pq or rt) or distributed
static bool CheckIndexExists ( const CSphString & sIndex )
{
cServedIndexRefPtr_c pServed = GetServed ( sIndex );
bool bMutable = ServedDesc_t::IsMutable ( pServed );
cDistributedIndexRefPtr_t pDist ( !bMutable ? GetDistr ( sIndex ) : nullptr );
if ( !bMutable && !pDist )
return TlsMsg::Err ( "unknown table '%s'", sIndex.cstr() );
if ( ServedDesc_t::IsMutable ( pServed ) || pDist )
return true;
else
return TlsMsg::Err ( "wrong type of table '%s'", sIndex.cstr() );
}
static StrVec_t SplitIndexes ( const CSphString & sIndexes )
{
const char * sIndexesNameDel = ",` ";
StrVec_t dRes;
sphSplitApply ( sIndexes.cstr(), sIndexes.Length(), sIndexesNameDel, [&dRes] ( const char * sTok, int iLen )
{
if ( !iLen )
return;
dRes.Add().SetBinary ( sTok, iLen );
});
return dRes;
}
static bool CheckIndexesExists ( const CSphString & sIndex )
{
StrVec_t dIndexes = SplitIndexes ( sIndex );
for ( const CSphString & sIndex : dIndexes )
{
if ( !CheckIndexExists ( sIndex ) )
return false;
}
return true;
}
// set cluster name into index desc for fast rejects
bool AssignClusterToIndex ( const CSphString & sIndex, const CSphString & sCluster )
{
TlsMsg::ResetErr();
cServedIndexRefPtr_c pServed = GetServed ( sIndex );
bool bMutable = ServedDesc_t::IsMutable ( pServed );
cDistributedIndexRefPtr_t pDist ( !bMutable ? GetDistr ( sIndex ) : nullptr );
if ( !bMutable && !pDist )
return TlsMsg::Err ( "unknown table, or wrong type of table '%s'", sIndex.cstr() );
if ( bMutable )
{
if ( pServed->m_sCluster==sCluster )
return true;
ServedIndexRefPtr_c pClone = MakeFullClone ( pServed );
pClone->m_sCluster = sCluster;
g_pLocalIndexes->Replace ( pClone, sIndex );
} else
{
if ( pDist->m_sCluster==sCluster )
return true;
DistributedIndexRefPtr_t pNewDist ( pDist->Clone() );
pNewDist->m_sCluster = sCluster;
g_pDistIndexes->Replace ( pNewDist, sIndex );
}
return true;
}
bool AssignClusterToIndexes ( const VecTraits_T<CSphString> & dIndexes, const CSphString & sCluster )
{
for ( const CSphString & sIndex : dIndexes )
{
if ( !AssignClusterToIndex ( sIndex, sCluster ) )
return false;
}
return true;
}
// lock or unlock write operations to disk chunks of index
static bool EnableIndexWrite ( const CSphString & sIndex )
{
cServedIndexRefPtr_c pServed = GetServed ( sIndex );
bool bMutable = ServedDesc_t::IsMutable ( pServed );
cDistributedIndexRefPtr_t pDist ( !bMutable ? GetDistr ( sIndex ) : nullptr );
if ( !bMutable && !pDist )
return TlsMsg::Err ( "unknown or wrong-typed table '%s'", sIndex.cstr() );
if ( pDist )
return true;
RIdx_T<RtIndex_i*> ( pServed )->EnableSave();
return true;
}
// handle replicated command (came from outside)
bool HandleCmdReplicated ( RtAccum_t & tAcc )
{
TRACE_SCHED ( "conn", "HandleCmdReplicated" );
TlsMsg::ResetErr();
if ( tAcc.m_dCmd.IsEmpty() )
return TlsMsg::Err ( "empty accumulator" );
const ReplicationCommand_t & tCmd = *tAcc.m_dCmd[0];
bool bCmdCluster = ( tAcc.m_dCmd.GetLength()==1
&& ( tCmd.m_eCommand==ReplCmd_e::CLUSTER_ALTER_ADD || tCmd.m_eCommand==ReplCmd_e::CLUSTER_ALTER_DROP ) );
if ( bCmdCluster )
{
if ( tCmd.m_eCommand==ReplCmd_e::CLUSTER_ALTER_ADD && !CheckIndexesExists ( tCmd.m_sIndex ) )
return TlsMsg::Err ( "replication error: %s, command %d, cluster %s", TlsMsg::szError(), (int)tCmd.m_eCommand, tCmd.m_sCluster.cstr() );
CommitMonitor_c tCommit ( tAcc );
return tCommit.CommitTOI();
}
if ( tAcc.m_dCmd.GetLength()==1 &&
( tCmd.m_eCommand== ReplCmd_e::UPDATE_API
|| tCmd.m_eCommand== ReplCmd_e::UPDATE_QL
|| tCmd.m_eCommand== ReplCmd_e::UPDATE_JSON ) )
{
int iUpd = -1;
CSphString sWarning;
CommitMonitor_c tCommit ( tAcc, &sWarning, &iUpd );
return tCommit.UpdateTOI() && ( sWarning.IsEmpty() || TlsMsg::Err ( "%s", sWarning.cstr() ) );
// FIXME!!! make update trx
}
cServedIndexRefPtr_c pServed = GetServed ( tCmd.m_sIndex );
if ( !pServed || !ServedDesc_t::IsMutable ( pServed ) )
return TlsMsg::Err ( "wrong type of table '%s' for replication, command %d", tCmd.m_sIndex.cstr(), (int)tCmd.m_eCommand );
CSphString sError;
// special path with wlocked index for truncate
if ( tCmd.m_eCommand == ReplCmd_e::TRUNCATE )
{
RPL_TNX << "truncate-commit, table '" << tCmd.m_sIndex.cstr() << "'";
return WIdx_T<RtIndex_i*> ( pServed )->Truncate ( sError, RtIndex_i::TRUNCATE )
|| TlsMsg::Err ( "%s", sError.cstr() );
}
assert ( tCmd.m_eCommand != ReplCmd_e::TRUNCATE );
RPL_TNX << "commit, table '" << tCmd.m_sIndex.cstr() << "', uid " << ( tCmd.m_pStored ? tCmd.m_pStored->m_iQUID : int64_t(0) ) << ", queries " << tCmd.m_dDeleteQueries.GetLength() << ", tags " << tCmd.m_sDeleteTags.scstr();
RIdx_T<RtIndex_i*> pIndex { pServed };
if ( !tAcc.SetupDocstore ( *pIndex, sError ) )
{
sphWarning ( "%s, table '%s', command %d", sError.cstr(), tCmd.m_sIndex.cstr(), (int)tCmd.m_eCommand );
return false;
}
return pIndex->Commit ( nullptr, &tAcc );
}
// single point there all commands passed these might be replicated to cluster
static bool HandleRealCmdReplicate ( RtAccum_t & tAcc, CommitMonitor_c && tMonitor ) EXCLUDES ( g_tClustersLock )
{
TRACE_CONN ( "conn", "HandleCmdReplicate" );
// FIXME!!! for now only PQ add and PQ delete multiple commands supported
const ReplicationCommand_t & tCmdCluster = *tAcc.m_dCmd[0];
auto pCluster = ClusterByName ( tCmdCluster.m_sCluster, nullptr );
if ( !pCluster )
{
if ( g_bReplicationStarted )
return TlsMsg::Err ( "unknown cluster '%s'", tCmdCluster.m_sCluster.cstr() );
return TlsMsg::Err ( "cluster '%s' can not replicate: %s", tCmdCluster.m_sCluster.cstr(), g_sReplicationStartError.cstr() );
}
if ( !pCluster->IsHealthy() )
return false;
if ( tCmdCluster.m_eCommand==ReplCmd_e::TRUNCATE && tCmdCluster.m_tReconfigure )
return TlsMsg::Err ( "RECONFIGURE is not supported for a cluster table" );
if ( tCmdCluster.m_bCheckIndex )
{
if ( tCmdCluster.m_eCommand==ReplCmd_e::CLUSTER_ALTER_ADD || tCmdCluster.m_eCommand==ReplCmd_e::CLUSTER_ALTER_DROP )
{
StrVec_t dIndexes = SplitIndexes ( tCmdCluster.m_sIndex );
if ( !CheckClusterIndexes ( dIndexes, pCluster ) )
return false;
} else if ( !CheckClusterIndex ( tCmdCluster.m_sIndex, pCluster ) )
return false;
}
bool bUpdate = ( tCmdCluster.m_eCommand==ReplCmd_e::UPDATE_API
|| tCmdCluster.m_eCommand==ReplCmd_e::UPDATE_QL
|| tCmdCluster.m_eCommand==ReplCmd_e::UPDATE_JSON );
if ( bUpdate && !ValidateUpdate ( tCmdCluster ) )
return false;
assert ( pCluster->m_pProvider );
bool bTOI = false;
// replicator check CRC of data - no need to check that at our side
int iKeysCount = tAcc.m_dCmd.GetLength() + tAcc.m_dAccumKlist.GetLength();
CSphVector<BYTE> dBufQueries;
CSphVector<uint64_t> dBufKeys;
dBufKeys.Reserve ( iKeysCount );
BEGIN_CONN ( "conn", "HandleCmdReplicate.serialize", "commands", tAcc.m_dCmd.GetLength() );
for ( auto& pCmd : tAcc.m_dCmd )
{
const ReplicationCommand_t & tCmd = *pCmd;
uint64_t uQueryHash = sphFNV64 ( tCmdCluster.m_sIndex.cstr() );
uQueryHash = sphFNV64 ( &tCmd.m_eCommand, sizeof(tCmd.m_eCommand), uQueryHash );
MemoryWriter_c tWriter ( dBufQueries );
SaveCmdHeader ( tCmd, tWriter );
auto iLenPos = dBufQueries.GetLength();
dBufQueries.AddN ( sizeof(DWORD) );
int iLenOff = dBufQueries.GetLength();
switch ( tCmd.m_eCommand )
{
case ReplCmd_e::PQUERY_ADD:
assert ( tCmd.m_pStored );
SaveStoredQuery ( *tCmd.m_pStored, tWriter );
uQueryHash = sphFNV64 ( &tCmd.m_pStored->m_iQUID, sizeof(tCmd.m_pStored->m_iQUID), uQueryHash );
break;
case ReplCmd_e::PQUERY_DELETE:
assert ( tCmd.m_dDeleteQueries.GetLength() || !tCmd.m_sDeleteTags.IsEmpty() );
SaveDeleteQuery ( tCmd.m_dDeleteQueries, tCmd.m_sDeleteTags.cstr(), tWriter );
for ( const auto& dQuery : tCmd.m_dDeleteQueries )
uQueryHash = sphFNV64 ( &dQuery, sizeof ( dQuery ), uQueryHash );
uQueryHash = sphFNV64cont ( tCmd.m_sDeleteTags.cstr(), uQueryHash );
break;
case ReplCmd_e::TRUNCATE:
// FIXME!!! add reconfigure option here
break;
case ReplCmd_e::CLUSTER_ALTER_ADD:
case ReplCmd_e::CLUSTER_ALTER_DROP:
bTOI = true;
break;
case ReplCmd_e::RT_TRX:
{
auto iStartPos = dBufQueries.GetLengthBytes();
tAcc.SaveRtTrx ( tWriter );
uQueryHash = sphFNV64cont( { dBufQueries.begin() + iStartPos, (int64_t)(dBufQueries.GetLengthBytes() - iStartPos) }, uQueryHash);
}
break;
case ReplCmd_e::UPDATE_API:
{
assert ( tCmd.m_pUpdateAPI );
const CSphAttrUpdate * pUpd = tCmd.m_pUpdateAPI;
uQueryHash = sphFNV64 ( pUpd->m_dDocids.Begin(), (int) pUpd->m_dDocids.GetLengthBytes(), uQueryHash );
uQueryHash = sphFNV64 ( pUpd->m_dPool.Begin(), (int) pUpd->m_dPool.GetLengthBytes(), uQueryHash );
SaveAttrUpdate ( *pUpd, tWriter );
}
break;
case ReplCmd_e::UPDATE_QL:
case ReplCmd_e::UPDATE_JSON:
{
assert ( tCmd.m_pUpdateAPI );
assert ( tCmd.m_pUpdateCond );
const CSphAttrUpdate * pUpd = tCmd.m_pUpdateAPI;
uQueryHash = sphFNV64 ( pUpd->m_dDocids.Begin(), (int) pUpd->m_dDocids.GetLengthBytes(), uQueryHash );
uQueryHash = sphFNV64 ( pUpd->m_dPool.Begin(), (int) pUpd->m_dPool.GetLengthBytes(), uQueryHash );
SaveAttrUpdate ( *pUpd, tWriter );
SaveUpdate ( *tCmd.m_pUpdateCond, tWriter );
}
break;
default:
return TlsMsg::Err ( "unknown command '%d'", (int)tCmd.m_eCommand );
}
// store query hash as key
dBufKeys.Add (uQueryHash);
if ( tCmd.m_eCommand == ReplCmd_e::RT_TRX )
dBufKeys.Append ( tAcc.m_dAccumKlist );
// store request length
sphUnalignedWrite ( &dBufQueries[iLenPos], dBufQueries.GetLength() - iLenOff );
}
END_CONN ( "conn" );
auto pWriteSet = pCluster->m_pProvider->MakeWriteSet();
BEGIN_CONN ( "conn", "HandleCmdReplicate.cluster_lock" );
Threads::ScopedCoroMutex_t tClusterLock { pCluster->m_tReplicationMutex };
END_CONN ( "conn" );
if ( bTOI )
return ReplicateTOI ( dBufKeys, dBufQueries, *pWriteSet, std::move ( tMonitor ) );
return Replicate ( dBufKeys, dBufQueries, *pWriteSet, std::move ( tMonitor ), bUpdate, tAcc.IsReplace () );
}
// single point there all commands passed these might be replicated, even if no cluster
static bool HandleCmdReplicateImpl ( RtAccum_t & tAcc, int * pDeletedCount, CSphString * pWarning, int * pUpdated ) EXCLUDES ( g_tClustersLock )
{
TRACE_CORO ( "sph", "HandleCmdReplicateImpl" );
CommitMonitor_c tMonitor ( tAcc, pDeletedCount, pWarning, pUpdated );
// with cluster path
if ( tAcc.IsClusterCommand () )
return HandleRealCmdReplicate ( tAcc, std::move ( tMonitor ) );
if ( tAcc.IsUpdateCommand () )
return tMonitor.UpdateTOI ();
return tMonitor.Commit ();
}
bool HandleCmdReplicate ( RtAccum_t & tAcc )
{
return HandleCmdReplicateImpl ( tAcc, nullptr, nullptr, nullptr );
}
bool HandleCmdReplicateDelete ( RtAccum_t & tAcc, int & iDeletedCount )
{
return HandleCmdReplicateImpl ( tAcc, &iDeletedCount, nullptr, nullptr );
}
bool HandleCmdReplicateUpdate ( RtAccum_t & tAcc, CSphString & sWarning, int & iUpdated )
{
return HandleCmdReplicateImpl ( tAcc, nullptr, &sWarning, &iUpdated );
}
bool SetIndexesClusterTOI ( const ReplicationCommand_t * pCmd )
{
assert ( pCmd );
const ReplicationCommand_t& tCmd = *pCmd;
assert ( tCmd.m_eCommand==ReplCmd_e::CLUSTER_ALTER_DROP || tCmd.m_eCommand==ReplCmd_e::CLUSTER_ALTER_ADD );
auto pCluster = ClusterByName ( tCmd.m_sCluster );
if ( !pCluster )
return false;
StrVec_t dIndexes = SplitIndexes ( tCmd.m_sIndex );
sphLogDebugRpl ( "SetIndexesClusterTOI '%s' for cluster '%s': indexes '%s' > '%s'", ( tCmd.m_eCommand==ReplCmd_e::CLUSTER_ALTER_ADD ? "add" : "drop" ), pCluster->m_sName.cstr(), tCmd.m_sIndex.cstr(), StrVec2Str ( pCluster->GetIndexes() ).cstr() );
if ( tCmd.m_bCheckIndex && !CheckClusterIndexes ( dIndexes, pCluster ) )
return false;
if ( tCmd.m_eCommand==ReplCmd_e::CLUSTER_ALTER_ADD )
{
if ( !AssignClusterToIndexes ( dIndexes, tCmd.m_sCluster ) )
return false;
pCluster->WithWlockedIndexes ( [&] ( auto & hIndexes, auto & hIndexesLoaded )
{
for ( const CSphString & sIndex : dIndexes )
{
hIndexes.Add ( sIndex );
hIndexesLoaded.Delete ( sIndex );
}
});
} else
{
if ( !AssignClusterToIndexes ( dIndexes, "" ) )
return false;
pCluster->WithWlockedIndexes ( [&] ( auto & hIndexes, auto & hIndexesLoaded )
{
for ( const CSphString & sIndex : dIndexes )
hIndexes.Delete ( sIndex );
});
}
TLS_MSG_STRING ( sError );
bool bSaved = SaveConfigInt ( sError );
sphLogDebugRpl ( "SetIndexesClusterTOI finished '%s' for cluster '%s': indexes '%s' > '%s', error: %s", ( tCmd.m_eCommand==ReplCmd_e::CLUSTER_ALTER_ADD ? "add" : "drop" ), pCluster->m_sName.cstr(), tCmd.m_sIndex.cstr(), StrVec2Str ( pCluster->GetIndexes() ).cstr(), sError.scstr() );
return bSaved;
}
static bool ValidateUpdate ( const ReplicationCommand_t & tCmd )
{
cServedIndexRefPtr_c pServed = GetServed ( tCmd.m_sIndex );
if ( !pServed )
return TlsMsg::Err ( "requires an existing table, %s", tCmd.m_sIndex.cstr() );
const ISphSchema& tSchema = RIdx_c ( pServed )->GetMatchSchema();
assert ( tCmd.m_pUpdateAPI );
CSphString sError;
return Update_CheckAttributes ( *tCmd.m_pUpdateAPI, tSchema, sError ) || TlsMsg::Err ( sError );
}
// load indexes received from another node or existed already into daemon
static bool ReplicatedIndexes ( const VecTraits_T<CSphString> & dIndexes, const CSphString & sCluster ) EXCLUDES ( g_tClustersLock )
{
assert ( ReplicationEnabled() );
if ( !dIndexes.all_of ( [] ( const CSphString & sIndex ) { return CheckIndexExists ( sIndex ); } ) )
return false;
auto pCluster = ClusterByName( sCluster );
if ( !pCluster )
return false;
sph::StringSet hIndexes ( dIndexes );
// scope for check of cluster data
{
Threads::SccRL_t rLock( g_tClustersLock );
// indexes should be new or from same cluster
for ( const auto& tCluster : g_hClusters )
{
const ReplicationCluster_t * pOrigCluster = tCluster.second;
if ( pOrigCluster==pCluster.CPtr() )
continue;
bool bHasCluster = pOrigCluster->WithRlockedIndexes([&hIndexes,pOrigCluster]( const auto & hOrigIndexes )
{
for ( const auto & tIndex : hOrigIndexes )
{
if ( hIndexes[tIndex.first] )
return TlsMsg::Err ( "table '%s' is already a part of cluster '%s'", tIndex.first.cstr(), pOrigCluster->m_sName.cstr() );
}
return true;
});
if ( !bHasCluster )
return false;
}
}
bool bOk = AssignClusterToIndexes ( dIndexes, sCluster );
// need to enable back local index write
for ( const CSphString & sIndex : dIndexes )
bOk &= EnableIndexWrite ( sIndex );
if ( !bOk )
return false;
pCluster->WithWlockedIndexes([&dIndexes] ( auto & hIndexes, auto & hIndexesLoaded )
{
hIndexes.Reset();
dIndexes.for_each ( [&hIndexes, &hIndexesLoaded] ( const auto & sIndex )
{
hIndexes.Add ( sIndex );
hIndexesLoaded.Delete ( sIndex );
});
});
TLS_MSG_STRING ( sError );
return SaveConfigInt ( sError );
}
// create string by join cluster path and given path
std::optional<CSphString> GetClusterPath ( const CSphString & sCluster ) EXCLUDES ( g_tClustersLock )
{
std::optional<CSphString> tRes;
auto pCluster = ClusterByName ( sCluster );
if ( pCluster )
tRes = GetDatadirPath ( pCluster->m_sPath );
return tRes;
}
// validate clusters paths
static bool ClusterCheckPath ( const CSphString& sPath, const char* szCluster, bool bNeedWrite = false ) EXCLUDES ( g_tClustersLock )
{
if ( !ReplicationEnabled() )
return TlsMsg::Err ( "data_dir option is missing in config or no replication listener is set, replication is disabled" );
auto sFullPath = GetDatadirPath ( sPath );
CSphString sError;
if ( !CheckPath ( sFullPath, bNeedWrite, sError ) )
return TlsMsg::Err ( "cluster '%s', %s", szCluster, sError.cstr() );
Threads::SccRL_t tClusterRLock ( g_tClustersLock );
for ( const auto& tCluster : g_hClusters )
{
if ( sPath == tCluster.second->m_sPath )
return TlsMsg::Err ( "duplicate paths, cluster '%s' has the same path as '%s'", szCluster, tCluster.second->m_sName.cstr() );
}
return true;
}
static ReplicationCluster_t* MakeClusterOffline ( ClusterDesc_t tDesc )
{
auto tPort = PortRange::AcquirePort();
if ( !tPort.IsValid() )
{
TlsMsg::Err ( "cluster '%s', no replication ports available, add replication listener", tDesc.m_sName.cstr() );
return nullptr;
}
CSphRefcountedPtr<ReplicationCluster_t> pCluster ( new ReplicationCluster_t ( std::move ( tDesc ) ) );
pCluster->m_tPort = std::move ( tPort );
return pCluster.Leak();
}
static bool PrepareNodesAndInitCluster ( ReplicationCluster_t& tCluster, BOOTSTRAP_E eBootStrap )
{
sphLogDebugRpl ( "PrepareNodesAndInitCluster '%s', bootstrap %d, nodes: %d", tCluster.m_sName.cstr(), (int)eBootStrap, tCluster.m_dClusterNodes.GetLength() );
if ( tCluster.m_dClusterNodes.IsEmpty() && eBootStrap!=BOOTSTRAP_E::YES )
{
sphWarning ( "no nodes found, created new cluster '%s'", tCluster.m_sName.cstr() );
eBootStrap = BOOTSTRAP_E::YES;
}
StrVec_t dNodes;
if ( eBootStrap == BOOTSTRAP_E::NO )
{
dNodes = GetNodeListFromRemotes ( tCluster );
if ( dNodes.IsEmpty() )
return false;
}
if ( !tCluster.Init () )
return false;
tCluster.SetViewNodes ( std::move ( dNodes ) );
tCluster.m_dClusterNodes = tCluster.FilterViewNodesByProto ();
return true;
}
static ReplicationCluster_t* MakeCluster ( ClusterDesc_t tDesc, BOOTSTRAP_E eBootStrap )
{
CSphRefcountedPtr<ReplicationCluster_t> pCluster ( MakeClusterOffline ( std::move ( tDesc ) ) );
if ( !pCluster )
return nullptr;
if ( !PrepareNodesAndInitCluster ( *pCluster, eBootStrap) )
return nullptr;
return pCluster.Leak();
}
static bool AddAndStartCluster ( ReplicationClusterRefPtr_c pCluster )
{
{
Threads::SccWL_t tLock ( g_tClustersLock );
if ( !g_hClusters.Add ( pCluster, pCluster->m_sName ) )
return false;
}
pCluster->StartListen();
return true;
}
static bool ClusterDescOk ( const ClusterDesc_t& tDesc, bool bForce ) noexcept
{
if ( tDesc.m_dClusterNodes.IsEmpty() )
sphWarning ( "no nodes found, create new cluster '%s'", tDesc.m_sName.cstr() );
// check cluster path is unique
if ( !ClusterCheckPath ( tDesc.m_sPath, tDesc.m_sName.cstr() ) )
{
sphWarning ( "Cluster %s: %s, skipped", tDesc.m_sName.cstr(), TlsMsg::szError() );
return false;
}
if ( !bForce )
return true;
auto sDataDirPath = GetDatadirPath ( tDesc.m_sPath );
if ( !NewClusterForce ( sDataDirPath ) || !CheckClusterNew ( sDataDirPath ) )
{
sphWarning ( "Cluster %s: %s, skipped", tDesc.m_sName.cstr(), TlsMsg::szError() );
return false;
}
return true;
}
// called from ReplicationServiceStart - see below
static void CoReplicationServiceStart ( bool bBootStrap ) EXCLUDES ( g_tClustersLock )
{
const auto eBootStrap = (BOOTSTRAP_E)bBootStrap;
assert ( Threads::IsInsideCoroutine() );
StrVec_t dFailedClustersToRemove;
auto fnRemoveFailedCluster = [&dFailedClustersToRemove] ( std::pair<CSphString, CSphRefcountedPtr<ReplicationCluster_t>>& hCluster ) {
sphWarning ( "%s", TlsMsg::szError() );
dFailedClustersToRemove.Add ( hCluster.first );
auto& pCluster = hCluster.second;
pCluster->WithRlockedIndexes ( [] ( const auto& hIndexes ) { for_each ( hIndexes, [] ( const auto& tIndex ) { AssignClusterToIndex ( tIndex.first, "" ); } ); } );
};
Threads::SccWL_t tLock ( g_tClustersLock );
for ( auto& hCluster : g_hClusters )
{
TlsMsg::ResetErr();
auto& sName = hCluster.first;
auto& pCluster = hCluster.second;
if ( !PrepareNodesAndInitCluster ( *pCluster, eBootStrap ) )
{
fnRemoveFailedCluster ( hCluster );
continue;
}
// check indexes valid
if ( !pCluster->Connect ( eBootStrap ) )
{
fnRemoveFailedCluster ( hCluster );
continue;
}
pCluster->StartListen();
sphLogDebugRpl ( "'%s' cluster started with %d tables", sName.cstr(), pCluster->WithRlockedIndexes ( [] ( const auto& hIndexes ) { return hIndexes.GetLength(); } ) );
}
if ( !g_hClusters.IsEmpty() && dFailedClustersToRemove.GetLength()==g_hClusters.GetLength() )
sphWarning ( "no clusters to start" );
for ( const auto& sCluster : dFailedClustersToRemove )
g_hClusters.Delete ( sCluster );
g_bReplicationStarted = true;
}
// start clusters on daemon start
void ReplicationServiceStart ( bool bBootStrap ) EXCLUDES ( g_tClustersLock )
{
// should be lined up with PrepareClustersOnStartup
if ( !ReplicationEnabled() )
{
if ( !g_hClusters.IsEmpty() )
sphWarning ( "loading %d cluster(s) but the replication disabled, %s", g_hClusters.GetLength(), g_sReplicationStartError.cstr() );
return;
}
Threads::CallCoroutine ( [=]() EXCLUDES ( g_tClustersLock ) { CoReplicationServiceStart ( bBootStrap ); } );
}
// called from ReplicationServiceStart - see below
static void CoPrepareClustersOnStartup ( bool bForce ) EXCLUDES ( g_tClustersLock )
{
SmallStringHash_T<ReplicationClusterRefPtr_c> hClusters;
assert ( Threads::IsInsideCoroutine() );
for ( const ClusterDesc_t& tDesc : GetClustersInt() )
{
if ( !ClusterDescOk ( tDesc, bForce ) )
continue;
if ( !CheckRemotesVersions ( tDesc ) )
{
sphWarning ( "%s", TlsMsg::szError() );
continue;
}
CSphRefcountedPtr<ReplicationCluster_t> pNewCluster { MakeClusterOffline ( tDesc ) };
if ( !pNewCluster ) {
sphWarning ( "%s", TlsMsg::szError() );
continue;
}
// check indexes valid
for ( const auto & tIndex : tDesc.m_hIndexes )
{
const CSphString & sIndex = tIndex.first;
if ( !AssignClusterToIndex ( sIndex, pNewCluster->m_sName ) )
{
sphWarning ( "%s, removed from cluster '%s'", TlsMsg::szError(), pNewCluster->m_sName.cstr() );
continue;
}
pNewCluster->WithWlockedIndexes ( [&sIndex] ( auto & hIndexes, auto & hIndexesLoaded )
{
hIndexes.Add ( sIndex );
hIndexesLoaded.Delete ( sIndex );
});
}
if ( !hClusters.Add ( pNewCluster, pNewCluster->m_sName ) )
{
for ( const auto & tIndex : tDesc.m_hIndexes )
{
if ( !AssignClusterToIndex ( tIndex.first, "" ) )
sphWarning ( "%s on removal table '%s' from a cluster", TlsMsg::szError(), tIndex.first.cstr() );
}
continue;
}
}
// copy prepared clusters
Threads::SccWL_t tLock ( g_tClustersLock );
assert ( g_hClusters.IsEmpty() );
for_each ( hClusters, [&] ( auto& hCluster ) REQUIRES ( g_tClustersLock ) { g_hClusters.Add ( hCluster.second, hCluster.first ); } );
}
void PrepareClustersOnStartup ( const VecTraits_T<ListenerDesc_t>& dListeners, bool bForce ) EXCLUDES ( g_tClustersLock )
{
if ( !SetReplicationListener ( dListeners, g_sReplicationStartError ) )
{
if ( !GetClustersInt().IsEmpty() )
sphWarning ( "%s", g_sReplicationStartError.cstr() );
else
sphLogDebugRpl ( "%s", g_sReplicationStartError.cstr() );
return;
}
g_bReplicationEnabled = true;
Threads::CallCoroutine ( [=]() EXCLUDES ( g_tClustersLock ) { CoPrepareClustersOnStartup ( bForce ); } );
}
// validate cluster option at SphinxQL statement
static std::optional <CSphString> CheckClusterOption ( const SmallStringHash_T<SqlInsert_t *> & hValues, const char * szName )
{
SqlInsert_t ** ppVal = hValues ( szName );
if (!ppVal)
return "";
if (( *ppVal )->m_sVal.IsEmpty())
{
TlsMsg::Err ( "'%s' should have a string value", szName );
return {};
}
return ( *ppVal )->m_sVal;
}
// validate cluster SphinxQL statement
static bool CheckClusterExists ( const CSphString & sCluster ) EXCLUDES ( g_tClustersLock )
{
Threads::SccRL_t rLock ( g_tClustersLock );
return g_hClusters.Exists ( sCluster );
}
// validate cluster SphinxQL statement
enum class MAKE_E : bool { CREATE, JOIN };
static std::optional<ClusterDesc_t> ClusterDescFromSphinxqlStatement ( const CSphString & sCluster, const StrVec_t & dNames, const CSphVector<SqlInsert_t> & dValues, MAKE_E eJoin ) EXCLUDES ( g_tClustersLock )
{
std::optional<ClusterDesc_t> tDesc;
if ( !ReplicationEnabled() )
{
TlsMsg::Err ( "data_dir option is missing or no replication provider configured" );
return tDesc;
}
if ( CheckClusterExists ( sCluster ) )
{
TlsMsg::Err ( "cluster '%s' already exists", sCluster.cstr() );
return tDesc;
}
SmallStringHash_T<SqlInsert_t*> hValues;
assert ( dNames.GetLength() == dValues.GetLength() );
ARRAY_FOREACH ( i, dNames )
hValues.Add ( &dValues[i], dNames[i] );
// optional items
auto tNodes = CheckClusterOption ( hValues, "at_node" );
if ( !tNodes )
return tDesc;
auto dClusterNodes = ParseNodesFromString ( tNodes.value() );
if ( dClusterNodes.IsEmpty() )
{
tNodes = CheckClusterOption ( hValues, "nodes" );
if ( !tNodes )
return tDesc;
dClusterNodes = ParseNodesFromString ( tNodes.value() );
}
if ( eJoin==MAKE_E::JOIN && dClusterNodes.IsEmpty() )
{
TlsMsg::Err ( "cannot join without either nodes list or AT node" );
return tDesc;
}
auto tPath = CheckClusterOption ( hValues, "path" );
if ( !tPath )
return tDesc;
auto tOptions = CheckClusterOption ( hValues, "options" );
if ( !tOptions )
return tDesc;
// check cluster path is unique
if ( !ClusterCheckPath ( tPath.value(), sCluster.cstr(), true ) )
return tDesc;
// all is ok, create cluster desc
tDesc.emplace();
tDesc->m_sName = sCluster;
tDesc->m_sPath = tPath.value();
tDesc->m_dClusterNodes = dClusterNodes;
tDesc->m_tOptions.Parse ( tOptions.value() );
return tDesc;
}
/////////////////////////////////////////////////////////////////////////////
// cluster joins to existed nodes
/////////////////////////////////////////////////////////////////////////////
bool ClusterJoin ( const CSphString & sCluster, const StrVec_t & dNames, const CSphVector<SqlInsert_t> & dValues, bool bUpdateNodes ) EXCLUDES ( g_tClustersLock )
{
Threads::ScopedCoroMutex_t tClusterLock { g_tClusterOpsLock };
TlsMsg::ResetErr();
auto tDesc = ClusterDescFromSphinxqlStatement ( sCluster, dNames, dValues, MAKE_E::JOIN );
if ( !tDesc )
return false;
sphLogDebugRpl ( "joining cluster '%s', nodes: %s", sCluster.cstr(), StrVec2Str ( tDesc->m_dClusterNodes ).cstr() );
// need to clean up Galera system files left from previous cluster
CleanClusterFiles ( GetDatadirPath ( tDesc->m_sPath ) );
if ( !CheckRemotesVersions ( tDesc.value() ) )
return false;
ReplicationClusterRefPtr_c pCluster { MakeCluster ( tDesc.value(), BOOTSTRAP_E::NO ) };
if ( !pCluster )
return false;
pCluster->m_bUserRequest = true;
if ( !pCluster->Connect ( BOOTSTRAP_E::NO ) )
return false;
if ( !AddAndStartCluster ( pCluster ) )
return false;
pCluster->m_bUserRequest = false;
bool bOk = IsSyncedOrDonor ( pCluster->WaitReady() );
if ( bOk && bUpdateNodes )
bOk &= DoClusterAlterUpdate ( sCluster, "nodes", NODES_E::BOTH );
if ( bOk )
return true;
if ( sphInterrupted() )
return TlsMsg::Err ( "%s", "daemon shutdown" );
{
ScopedMutex_t tLock ( pCluster->m_tErrorLock );
TlsMsg::Err ( pCluster->m_sError.cstr() );
}
sphWarning ( "'%s' cluster after join error: %s, nodes '%s'", sCluster.cstr(), TlsMsg::szError(), StrVec2Str ( pCluster->m_dClusterNodes ).cstr() );
// need to wait recv thread to complete in case of error after worker started
pCluster->m_bWorkerActive.Wait ( [] ( bool bWorking ) { return !bWorking; } );
Threads::SccWL_t wLock ( g_tClustersLock );
sphLogDebugRpl ( "deleting cluster %s", sCluster.cstr() );
g_hClusters.Delete ( sCluster );
return false;
}
/////////////////////////////////////////////////////////////////////////////
// cluster creates master node
/////////////////////////////////////////////////////////////////////////////
bool ClusterCreate ( const CSphString & sCluster, const StrVec_t & dNames, const CSphVector<SqlInsert_t> & dValues ) EXCLUDES ( g_tClustersLock )
{
Threads::ScopedCoroMutex_t tClusterLock { g_tClusterOpsLock };
TlsMsg::ResetErr();
if ( !g_bReplicationStarted )
return TlsMsg::Err ( "can not create cluster '%s': %s", sCluster.cstr(), g_sReplicationStartError.cstr() );
auto tDesc = ClusterDescFromSphinxqlStatement ( sCluster, dNames, dValues, MAKE_E::CREATE );
if ( !tDesc )
return TlsMsg::Err ( "failed to create desc: %s", TlsMsg::szError() );
// need to clean up Galera system files left from previous cluster
CleanClusterFiles ( GetDatadirPath ( tDesc->m_sPath ) );
sphLogDebugRpl ( "creating cluster '%s', nodes: %s", sCluster.cstr(), StrVec2Str ( dNames ).cstr() );
ReplicationClusterRefPtr_c pCluster { MakeCluster ( tDesc.value(), BOOTSTRAP_E::YES ) };
if ( !pCluster )
return TlsMsg::Err ( "failed to make cluster: %s", TlsMsg::szError() );
if ( !pCluster->Connect ( BOOTSTRAP_E::YES ) )
return TlsMsg::Err ( "Failed to connect %s", TlsMsg::szError() );
if ( !AddAndStartCluster ( pCluster ) )
return TlsMsg::Err ( "Failed to start and add %s", TlsMsg::szError() );
auto eState = pCluster->WaitReady();
TLS_MSG_STRING ( sError );
return SaveConfigInt ( sError ) && ( IsSyncedOrDonor ( eState ) || TlsMsg::Err ( "Wrong state %s", szNodeState( eState ) ) );
}
// utility function to filter nodes list provided at string by specific protocol
StrVec_t ReplicationCluster_t::FilterViewNodesByProto ( Proto_e eProto, bool bResolve ) const
{
Threads::SccRL_t tNodesRLock ( m_tViewNodesLock );
return FilterNodesByProto ( m_dViewNodes, eProto, bResolve );
}
StrVec_t ReplicationCluster_t::GetViewNodes() const
{
Threads::SccRL_t tNodesRLock ( m_tViewNodesLock );
return m_dViewNodes;
}
void ReplicationCluster_t::SetViewNodes ( StrVec_t&& dNodes )
{
Threads::SccWL_t tNodesWLock ( m_tViewNodesLock );
m_dViewNodes = std::move ( dNodes );
}
StrVec_t ReplicationCluster_t::GetIndexes() const noexcept
{
StrVec_t dIndexes;
WithRlockedAllIndexes ( [&dIndexes] ( const auto & hIndexes, auto & hIndexesLoaded )
{
for ( const auto & tIndex : hIndexes )
dIndexes.Add ( tIndex.first );
for ( const auto & tIndex : hIndexesLoaded )
{
assert ( !hIndexes[tIndex.first] );
dIndexes.Add ( tIndex.first );
}
});
return dIndexes;
}
void ReportClusterError ( const CSphString & sCluster, const CSphString & sError, const char * szClient, E_CLUSTER eCmd )
{
if ( sError.IsEmpty() )
return;
sphWarning ( "'%s' cluster [%s], cmd: %s(%d), error: %s", sCluster.cstr(), szClient, szClusterCmd ( eCmd ), (int)eCmd, sError.cstr() );
auto pCluster = ClusterByName ( sCluster, nullptr );
if ( !pCluster )
return;
ScopedMutex_t tErrorLock ( pCluster->m_tErrorLock );
if ( pCluster->m_sError.GetLength()>1024 ) // collect up to 1024 chars
pCluster->m_sError.Rewind();
pCluster->m_sError += sError.cstr();
}
// command at remote node for CLUSTER_DELETE to delete cluster
bool ClusterDelete ( const CSphString & sCluster ) EXCLUDES ( g_tClustersLock )
{
// erase cluster from all hashes
ReplicationClusterRefPtr_c pCluster;
{
Threads::SccWL_t tLock ( g_tClustersLock );
if ( !g_hClusters.Exists ( sCluster ) )
return TlsMsg::Err ( "unknown cluster '%s'", sCluster.cstr() );
pCluster = g_hClusters[sCluster];
g_hClusters.Delete ( sCluster );
}
sphLogDebugRpl ( "remote delete cluster %s", sCluster.cstr() );
// remove cluster from cache without delete of cluster itself
pCluster->DisconnectAndDeleteProvider();
pCluster->WithRlockedIndexes ( [] ( const auto& hIndexes ) { for_each ( hIndexes, [] ( const auto& tIndex ) { AssignClusterToIndex ( tIndex.first, "" ); } ); } );
return true;
}
/////////////////////////////////////////////////////////////////////////////
// cluster deletes
/////////////////////////////////////////////////////////////////////////////
// cluster delete every node, than itself
bool GloballyDeleteCluster ( const CSphString & sCluster, CSphString & sError ) EXCLUDES (g_tClustersLock)
{
TlsMsg::Err();
if ( !g_bReplicationStarted )
{
sError.SetSprintf ( "can not delete cluster '%s': %s", sCluster.cstr(), g_sReplicationStartError.cstr() );
return false;
}
auto pCluster = ClusterByName ( sCluster );
if ( !pCluster )
{
TlsMsg::MoveError ( sError );
return false;
}
auto dNodes = pCluster->FilterViewNodesByProto();
SendClusterDeleteToNodes ( dNodes, sCluster );
bool bOk = ClusterDelete ( sCluster );
bOk &= SaveConfigInt ( sError );
TlsMsg::MoveError ( sError );
return bOk;
}
bool ClusterGetState ( const CSphString & sCluster, RemoteNodeClusterState_t & tState ) EXCLUDES ( g_tClustersLock )
{
auto pCluster = ClusterByName ( sCluster );
if ( !pCluster )
return false;
tState.m_eState = pCluster->GetState();
tState.m_sNode = pCluster->GetNodeName();
return true;
}
static bool HasNotReadyNodes ( ReplicationClusterRefPtr_c pCluster )
{
ClusterState_e eState = pCluster->GetState();
if ( eState==ClusterState_e::DONOR || eState==ClusterState_e::JOINING )
return true;
const auto dStates = GetStatesFromRemotes ( *pCluster );
return dStates.any_of ( []( auto & tState ) { return ( tState.m_eState==ClusterState_e::DONOR || tState.m_eState==ClusterState_e::JOINING ); });
}
// cluster ALTER statement that removes index from cluster but keep it at daemon
static bool ClusterAlterDrop ( const CSphString & sCluster, const VecTraits_T<CSphString> & dIndexes )
{
RtAccum_t tAcc;
tAcc.AddCommand ( ReplCmd_e::CLUSTER_ALTER_DROP, StrVec2Str ( dIndexes, "," ), sCluster );
return HandleCmdReplicate ( tAcc );
}
// FIXME!!! refactor to use same code as SendClusterIndexes
static bool SendIndex ( const CSphString & sIndex, ReplicationClusterRefPtr_c pCluster )
{
cServedIndexRefPtr_c pServed = GetServed ( sIndex );
bool bMutable = ServedDesc_t::IsMutable ( pServed );
bool bDist = ( !bMutable && g_pDistIndexes->Contains ( sIndex ) );
if ( !bMutable && !bDist )
return TlsMsg::Err ( "unknown or wrong table '%s'", sIndex.cstr() );
int iAttempt = 0;
// should wait a bit longer during join
// FIXME!!! fetch progress delta time and continue to wait while there is a progress
const int iRetryCount = ReplicationRetryCount() * 2;
const int iRetryDelay = ReplicationRetryDelay() * 2;
int64_t tmStart = sphMicroTimer();
while ( true )
{
if ( HasNotReadyNodes ( pCluster ) )
{
iAttempt++;
if ( iAttempt>=iRetryCount )
{
int64_t tmEnd = sphMicroTimer();
return TlsMsg::Err ( "alter '%s' has some nodes not ready for %.3f seconds", pCluster->m_sName.cstr(), (float)(tmEnd - tmStart)/1000000.0f );
}
// FIXME!!! send index only into new node in case of next try happens
sphLogDebugRpl ( "alter '%s' has some nodes not ready, will wait for %d seconds before retry %d", pCluster->m_sName.cstr(), iRetryDelay / 1000, iAttempt );
Threads::Coro::SleepMsec ( iRetryDelay );
continue;
}
auto dNodes = pCluster->FilterViewNodesByProto ( Proto_e::SPHINX, false );
sphLogDebugRpl ( "alter '%s' SST index '%s' to nodes %d: '%s'", pCluster->m_sName.cstr(), sIndex.cstr(), dNodes.GetLength(), StrVec2Str ( dNodes ).cstr() );
// ok for just created cluster (wo nodes) to add existed index
if ( !dNodes.IsEmpty() )
{
VecAgentDesc_t dDesc = GetDescAPINodes ( dNodes, Resolve_e::SLOW );
if ( TlsMsg::HasErr() )
return false;
sphLogDebugRpl ( "alter '%s' SST index '%s' to resolved nodes %d", pCluster->m_sName.cstr(), sIndex.cstr(), dDesc.GetLength() );
if ( dDesc.GetLength() )
{
bool bReplicated = ( bMutable ? ReplicateIndexToNodes ( pCluster->m_sName, sIndex, dDesc, pServed ) : ReplicateDistIndexToNodes ( pCluster->m_sName, sIndex, dDesc ) );
if ( !bReplicated )
{
if ( TlsMsg::HasErr() )
sphWarning ( "%s", TlsMsg::szError() );
return false;
}
}
}
// nodes list might change during alter at the other node
auto dNewNodes = pCluster->FilterViewNodesByProto ( Proto_e::SPHINX, false );
// passed fine no join during the alter and the cluster remains same
if ( !HasNotReadyNodes ( pCluster ) && IsSameVector ( dNodes, dNewNodes ) )
break;
sphLogDebugRpl ( "nodes not ready during alter '%s', wait for %d sec before retry %d", pCluster->m_sName.cstr(), iRetryDelay / 1000, iAttempt );
Threads::Coro::SleepMsec ( iRetryDelay );
// no need to increase attempt count here as it will be checked on next try
}
return true;
}
struct LoadedIndexesClusterCleanup_t
{
LoadedIndexesClusterCleanup_t ( bool & bAdded, const VecTraits_T<CSphString> & dIndexes, ReplicationClusterRefPtr_c pCluster )
: m_bAdded ( bAdded )
, m_dIndexes ( dIndexes )
, m_pCluster ( pCluster )
{}
~LoadedIndexesClusterCleanup_t()
{
if ( !m_bAdded )
{
m_pCluster->WithWlockedIndexes ( [this] ( auto & hIndexes, auto & hIndexesLoaded )
{
for ( const CSphString & sIndex : m_dIndexes )
hIndexesLoaded.Delete ( sIndex );
});
}
}
bool & m_bAdded;
const VecTraits_T<CSphString> & m_dIndexes;
ReplicationClusterRefPtr_c m_pCluster;
};
// cluster ALTER statement that adds index
static bool ClusterAlterAdd ( const CSphString & sCluster, const VecTraits_T<CSphString> & dIndexes )
EXCLUDES ( g_tClustersLock )
{
auto pCluster = ClusterByName ( sCluster );
if ( !pCluster )
return false;
if ( !pCluster->IsHealthy() )
return false;
for ( const CSphString & sIndex : dIndexes )
{
if ( !SendIndex ( sIndex, pCluster ) )
return false;
}
bool bAdded = false;
pCluster->WithWlockedIndexes ( [&dIndexes] ( auto & hIndexes, auto & hIndexesLoaded )
{
for ( const CSphString & sIndex : dIndexes )
hIndexesLoaded.Add ( sIndex );
});
LoadedIndexesClusterCleanup_t tCleanup ( bAdded, dIndexes, pCluster );
sphLogDebugRpl ( "alter '%s' adding index '%s'", pCluster->m_sName.cstr(), StrVec2Str ( dIndexes, "," ).cstr() );
RtAccum_t tAcc;
ReplicationCommand_t * pAddCmd = tAcc.AddCommand ( ReplCmd_e::CLUSTER_ALTER_ADD, StrVec2Str ( dIndexes, "," ), sCluster );
pAddCmd->m_bCheckIndex = false;
bAdded = HandleCmdReplicate ( tAcc );
sphLogDebugRpl ( "alter '%s' %s index '%s'", pCluster->m_sName.cstr(), ( bAdded ? "added" : "failed to add" ), StrVec2Str ( dIndexes, "," ).cstr() );
return bAdded;
}
static bool ClusterAddCheckDistLocals ( const StrVec_t & dLocals, const CSphString & sCluster, const CSphString & sIndex, CSphString & sError )
{
StringBuilder_c sMissed ( "," );
StringBuilder_c sNonCluster ( "," );
StringBuilder_c sWrongCluster ( "," );
for ( const CSphString & sLocal : dLocals )
{
cServedIndexRefPtr_c pServed = GetServed ( sLocal );
if ( !ServedDesc_t::IsMutable ( pServed ) )
sMissed += sLocal.cstr();
else if ( pServed->m_sCluster.IsEmpty() )
sNonCluster += sLocal.cstr();
else if ( pServed->m_sCluster!=sCluster )
sWrongCluster += sLocal.cstr();
}
if ( sMissed.IsEmpty() && sNonCluster.IsEmpty() && sWrongCluster.IsEmpty() )
return true;
StringBuilder_c sMsg;
sMsg.Appendf ( "can not add distributed table '%s' into cluster '%s';", sIndex.cstr(), sCluster.cstr() );
sMsg.StartBlock ( "; " );
if ( !sMissed.IsEmpty() )
sMsg.Appendf ( "has unknown local tables: %s", sMissed.cstr() );
if ( !sNonCluster.IsEmpty() )
sMsg.Appendf ( "has tables not in the cluster: %s", sNonCluster.cstr() );
if ( !sWrongCluster.IsEmpty() )
sMsg.Appendf ( "has tables in the other cluster: %s", sNonCluster.cstr() );
sMsg.FinishBlock();
sError = sMsg.cstr();
return false;
}
// cluster ALTER statement
bool ClusterAlter ( const CSphString & sCluster, const CSphString & sIndexes, bool bAdd, CSphString & sError )
{
StrVec_t dIndexes = SplitIndexes ( sIndexes );
dIndexes.Uniq();
Threads::ScopedCoroMutex_t tClusterLock { g_tClusterOpsLock };
{
for ( const CSphString & sIndex : dIndexes )
{
cServedIndexRefPtr_c pServed = GetServed ( sIndex );
bool bMutable = ServedDesc_t::IsMutable ( pServed );
cDistributedIndexRefPtr_t pDist ( !bMutable ? GetDistr ( sIndex ) : nullptr );
if ( !bMutable && !pDist )
{
sError.SetSprintf ( "unknown or wrong type of table '%s'", sIndex.cstr() );
return false;
}
const CSphString & sIndexCluster = ( bMutable ? pServed->m_sCluster : pDist->m_sCluster );
if ( bAdd )
{
if ( !sIndexCluster.IsEmpty() )
{
sError.SetSprintf ( "table '%s' is already part of cluster '%s'", sIndex.cstr(), sIndexCluster.cstr() );
return false;
}
// all local indexes should be part of the cluster too
if ( pDist && !ClusterAddCheckDistLocals ( pDist->m_dLocal, sCluster, sIndex, sError ) )
return false;
} else
{
if ( sIndexCluster.IsEmpty() )
{
sError.SetSprintf ( "table '%s' is not in cluster '%s'", sIndex.cstr(), sCluster.cstr() );
return false;
}
}
}
}
if ( !g_bReplicationStarted )
{
sError.SetSprintf ( "can not ALTER cluster '%s': %s", sCluster.cstr(), g_sReplicationStartError.cstr() );
return false;
}
bool bOk = false;
if ( bAdd )
bOk = ClusterAlterAdd ( sCluster, dIndexes );
else
bOk = ClusterAlterDrop ( sCluster, dIndexes );
TlsMsg::MoveError ( sError );
bOk &= SaveConfigInt ( sError );
return bOk;
}
/////////////////////////////////////////////////////////////////////////////
// SST
/////////////////////////////////////////////////////////////////////////////
bool IsSameVector ( StrVec_t & dSrc, StrVec_t & dDst )
{
if ( dSrc.GetLength()!=dDst.GetLength() )
return false;
dSrc.Sort();
dDst.Sort();
ARRAY_FOREACH ( i, dSrc )
{
if ( dSrc[i]!=dDst[i] )
return false;
}
return true;
}
bool AddLoadedIndexIntoCluster ( const CSphString & sCluster, const CSphString & sIndex )
{
auto pCluster = ClusterByName ( sCluster );
if ( !pCluster )
return false;
pCluster->WithWlockedIndexes ( [&sIndex] ( auto & hIndexes, auto & hIndexesLoaded ) { hIndexesLoaded.Add ( sIndex ); });
return true;
}
// send local indexes to remote nodes via API
bool SendClusterIndexes ( const ReplicationCluster_t * pCluster, const CSphString & sNode, bool bBypass, const Wsrep::GlobalTid_t & tStateID )
{
auto dDesc = GetDescAPINodes ( ParseNodesFromString ( sNode ), Resolve_e::SLOW );
if ( dDesc.IsEmpty() )
{
if ( TlsMsg::HasErr() )
TlsMsg::Err ( "%s invalid node, error: %s", sNode.cstr(), TlsMsg::MoveToString().cstr() );
else
TlsMsg::Err ( "%s invalid node", sNode.cstr() );
return false;
}
auto dIndexes = pCluster->GetIndexes();
ClusterSyncedRequest_t tSyncedRequest;
tSyncedRequest.m_sCluster = pCluster->m_sName;
tSyncedRequest.m_tGtid = tStateID;
tSyncedRequest.m_dIndexes = dIndexes;
if ( bBypass )
return SendClusterSynced ( dDesc, tSyncedRequest );
bool bSentOk = true;
while ( true )
{
sphLogDebugRpl ( "sending cluster '%s' indexes %d '%s'...'%s'", pCluster->m_sName.cstr(), dIndexes.GetLength(), ( dIndexes.GetLength() ? dIndexes.First().cstr() : "" ), ( dIndexes.GetLength() ? dIndexes.Last().cstr() : "" ) );
for ( const CSphString & sIndex : dIndexes )
{
cServedIndexRefPtr_c pServed = GetServed ( sIndex );
bool bMutable = ServedDesc_t::IsMutable ( pServed );
cDistributedIndexRefPtr_t pDist ( !bMutable ? GetDistr ( sIndex ) : nullptr );
if ( !bMutable && !pDist )
{
bSentOk = false;
sphWarning ( "unknown or wrong table '%s'", sIndex.cstr() );
continue;
}
bool bReplicated = ( bMutable ? ReplicateIndexToNodes ( pCluster->m_sName, sIndex, dDesc, pServed ) : ReplicateDistIndexToNodes ( pCluster->m_sName, sIndex, dDesc ) );
if ( !bReplicated )
{
sphWarning ( "%s", TlsMsg::szError() );
bSentOk = false;
break;
}
if ( TlsMsg::HasErr() )
sphWarning ( "%s", TlsMsg::szError() );
}
// index list could have changed due to alter at the other node
auto dNewIndexes = pCluster->GetIndexes();
if ( dNewIndexes.GetLength() )
sphLogDebugRpl ( "sent cluster '%s' indexes %d '%s'...'%s'", pCluster->m_sName.cstr(), dNewIndexes.GetLength(), dNewIndexes.First().cstr(), dNewIndexes.Last().cstr() );
if ( IsSameVector ( dIndexes, dNewIndexes ) )
break;
sphLogDebugRpl ( "index list changed during donate '%s' to '%s'", pCluster->m_sName.cstr(), sNode.cstr() );
// FIXME!!! send only new indexes but loads all cluster indexes
dIndexes = dNewIndexes;
}
tSyncedRequest.m_dIndexes = dIndexes;
tSyncedRequest.m_bSendFilesSuccess = bSentOk;
tSyncedRequest.m_sMsg = TlsMsg::MoveToString();
bool bSyncOk = SendClusterSynced ( dDesc, tSyncedRequest );
return bSentOk && bSyncOk;
}
// callback at remote node for CLUSTER_SYNCED to pick up received indexes then call Galera sst_received
bool ClusterSynced ( const ClusterSyncedRequest_t & tCmd ) EXCLUDES ( g_tClustersLock )
{
sphLogDebugRpl ( "join sync %s, UID %s, sent %s, tables %d, %s", tCmd.m_sCluster.cstr(), Wsrep::Gtid2Str ( tCmd.m_tGtid ).cstr(), ( tCmd.m_bSendFilesSuccess ? "ok" : "failed" ), tCmd.m_dIndexes.GetLength(), tCmd.m_sMsg.scstr() );
if ( !tCmd.m_bSendFilesSuccess )
{
if ( tCmd.m_sMsg.IsEmpty() )
TlsMsg::Err ( "donor failed to send files" );
else
TlsMsg::Err ( tCmd.m_sMsg );
}
Wsrep::GlobalTid_t tGtid {}; // by default is 'UNDEFINED'
bool bValid = ( tCmd.m_bSendFilesSuccess && tCmd.m_tGtid != tGtid );
if ( bValid )
bValid &= ReplicatedIndexes ( tCmd.m_dIndexes, tCmd.m_sCluster );
auto pCluster = ClusterByName ( tCmd.m_sCluster );
if ( !pCluster )
return false;
tGtid = tCmd.m_tGtid;
int iRes = 0;
if ( !bValid )
{
tGtid.m_iSeqNo = Wsrep::WRONG_SEQNO;
iRes = -ECANCELED;
}
pCluster->m_pProvider->SstReceived ( tGtid, iRes );
return bValid;
}
// validate that SphinxQL statement could be run for this cluster:index
bool ValidateClusterStatement ( const CSphString & sIndexName, const ServedDesc_t & tDesc, const CSphString & sStmtCluster, bool bHTTP )
{
if ( tDesc.m_sCluster==sStmtCluster )
return true;
if ( tDesc.m_sCluster.IsEmpty() )
return TlsMsg::Err ( "table '%s' is not in any cluster, use just '%s'", sIndexName.cstr(), sIndexName.cstr() );
if ( !bHTTP )
return TlsMsg::Err ( "table '%s' is a part of cluster '%s', use '%s:%s'", sIndexName.cstr(), tDesc.m_sCluster.cstr(), tDesc.m_sCluster.cstr(), sIndexName.cstr() );
return TlsMsg::Err( R"(table '%s' is a part of cluster '%s', use "cluster":"%s" and "table":"%s" properties)", sIndexName.cstr(), tDesc.m_sCluster.cstr(), tDesc.m_sCluster.cstr(), sIndexName.cstr() );
}
std::optional<CSphString> IsPartOfCluster ( const ServedDesc_t * pDesc )
{
assert ( pDesc );
if ( !pDesc->m_sCluster.IsEmpty() )
return pDesc->m_sCluster;
return {};
}
// return set of nodes for given cluster (both, clusternodes and viewnodes)
StrVec_t ClusterGetAllNodes ( const CSphString & sCluster ) EXCLUDES ( g_tClustersLock )
{
TlsMsg::Err();
auto pCluster = ClusterByName ( sCluster );
if ( !pCluster )
return {};
// send back view nodes of cluster - as list of actual nodes
sph::StringSet hNodes;
for_each ( pCluster->GetViewNodes(), [&hNodes] ( const auto& sNode ) { hNodes.Add ( sNode ); } );
for_each ( pCluster->m_dClusterNodes, [&hNodes] ( const auto& sNode ) { hNodes.Add ( sNode ); } );
StrVec_t dNodes;
for_each ( hNodes, [&dNodes] ( const auto& tNode ) { dNodes.Add ( tNode.first ); } );
return dNodes;
// return FilterNodesByProto ( dNodes, Proto_e::REPLICATION );
}
// cluster ALTER statement that updates nodes option from view nodes at all nodes at cluster
bool ClusterAlterUpdate ( const CSphString & sCluster, const CSphString & sUpdate, CSphString & sError )
{
TlsMsg::ResetErr();
auto bOk = DoClusterAlterUpdate ( sCluster, sUpdate, NODES_E::VIEW );
TlsMsg::MoveError ( sError );
return bOk;
}
bool DoClusterAlterUpdate ( const CSphString & sCluster, const CSphString & sUpdate, NODES_E eNodes ) EXCLUDES ( g_tClustersLock )
{
TlsMsg::ResetErr();
if ( sUpdate!="nodes" )
return TlsMsg::Err ( "unhandled statement, only UPDATE nodes are supported, got '%s'", sUpdate.cstr() );
auto pCluster = ClusterByName ( sCluster );
if ( !pCluster || !pCluster->IsHealthy() )
return false;
// need to update all VIEW nodes - not cluster set nodes
StrVec_t dNodes;
// local nodes update
bool bOk = ClusterUpdateNodes ( sCluster, eNodes, &dNodes );
if ( dNodes.IsEmpty() )
return false;
// remote nodes update after locals updated
if ( !SendClusterUpdateNodes ( sCluster, eNodes, dNodes ) )
{
sphWarning ( "cluster %s nodes update error %s", sCluster.cstr(), TlsMsg::szError() );
TlsMsg::ResetErr();
}
return bOk;
}
// callback at remote node for CLUSTER_UPDATE_NODES to update nodes list at cluster from actual nodes list
bool ClusterUpdateNodes ( const CSphString & sCluster, NODES_E eNodes, StrVec_t * pNodes ) EXCLUDES ( g_tClustersLock )
{
auto pCluster = ClusterByName ( sCluster );
if ( !pCluster || !pCluster->IsHealthy() )
{
// node in the joining state should skip the command
if ( pCluster && pCluster->GetState()==ClusterState_e::JOINING )
{
TlsMsg::ResetErr();
return true;
}
return false;
}
auto fnNodesHash = [](const StrVec_t dNodes) {
uint64_t uRes = SPH_FNV64_SEED;
dNodes.for_each ( [&uRes] ( auto& sNode ) { uRes = sphFNV64cont ( sNode.cstr(), uRes ); } );
return uRes;
};
TlsMsg::ResetErr();
StrVec_t dNodes = pCluster->GetViewNodes();
if ( eNodes==NODES_E::BOTH )
dNodes.Append ( pCluster->m_dClusterNodes );
uint64_t uWasNodes = fnNodesHash ( pCluster->m_dClusterNodes );
pCluster->m_dClusterNodes = FilterNodesByProto ( dNodes, Proto_e::SPHINX );
bool bSaveConf = uWasNodes != fnNodesHash ( pCluster->m_dClusterNodes );
if ( TlsMsg::HasErr() )
return TlsMsg::Err ( "cluster '%s', invalid nodes, error: %s", sCluster.cstr(), TlsMsg::szError() );
if ( pNodes )
*pNodes = pCluster->m_dClusterNodes;
else
bSaveConf = true;
TLS_MSG_STRING ( sError );
auto bOk = true;
if ( bSaveConf )
bOk = SaveConfigInt ( sError );
return bOk;
}
| 76,312
|
C++
|
.cpp
| 1,911
| 37.426478
| 284
| 0.702124
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,904
|
exprtraits.cpp
|
manticoresoftware_manticoresearch/src/exprtraits.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "exprtraits.h"
ESphAttr GetIntType ( int64_t iValue )
{
return ( iValue>=(int64_t)INT_MIN && iValue<=(int64_t)INT_MAX ) ? SPH_ATTR_INTEGER : SPH_ATTR_BIGINT;
}
bool IsNumeric ( ESphAttr eType )
{
return eType==SPH_ATTR_INTEGER || eType==SPH_ATTR_BIGINT || eType==SPH_ATTR_FLOAT;
}
ESphAttr WidestType ( ESphAttr a, ESphAttr b )
{
assert ( ( IsNumeric(a) && IsNumeric(b) ) || ( IsNumeric(a) && b==SPH_ATTR_JSON_FIELD ) || ( a==SPH_ATTR_JSON_FIELD && IsNumeric(b) ) );
if ( a==SPH_ATTR_DOUBLE || b==SPH_ATTR_DOUBLE )
return SPH_ATTR_DOUBLE;
if ( a==SPH_ATTR_FLOAT || b==SPH_ATTR_FLOAT )
return SPH_ATTR_FLOAT;
if ( a==SPH_ATTR_BIGINT || b==SPH_ATTR_BIGINT )
return SPH_ATTR_BIGINT;
if ( a==SPH_ATTR_UINT64 || b==SPH_ATTR_UINT64 )
return SPH_ATTR_BIGINT;
if ( a==SPH_ATTR_JSON_FIELD || b==SPH_ATTR_JSON_FIELD )
return SPH_ATTR_BIGINT;
return SPH_ATTR_INTEGER;
}
uint64_t sphCalcLocatorHash ( const CSphAttrLocator & tLoc, uint64_t uPrevHash )
{
auto uHash = uPrevHash;
CALC_POD_HASH ( tLoc.m_iBitOffset );
CALC_POD_HASH ( tLoc.m_iBitCount );
CALC_POD_HASH ( tLoc.m_iBlobAttrId );
CALC_POD_HASH ( tLoc.m_iBlobRowOffset );
CALC_POD_HASH ( tLoc.m_nBlobAttrs );
CALC_POD_HASH ( tLoc.m_bDynamic );
return uHash;
}
uint64_t sphCalcExprDepHash ( const char * szTag, ISphExpr * pExpr, const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
uint64_t uHash = sphFNV64 ( szTag, (int) strlen(szTag), uPrevHash );
return sphCalcExprDepHash ( pExpr, tSorterSchema, uHash, bDisable );
}
uint64_t sphCalcExprDepHash ( ISphExpr * pExpr, const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
CSphVector<CSphString> dCols;
pExpr->Command ( SPH_EXPR_GET_DEPENDENT_COLS, &dCols );
uint64_t uHash = uPrevHash;
ARRAY_FOREACH ( i, dCols )
{
const CSphColumnInfo * pCol = tSorterSchema.GetAttr ( dCols[i].cstr() );
assert(pCol);
if ( pCol->m_pExpr )
{
// one more expression
uHash = pCol->m_pExpr->GetHash ( tSorterSchema, uHash, bDisable );
if ( bDisable )
return 0;
}
else
uHash = sphCalcLocatorHash ( pCol->m_tLocator, uHash ); // plain column, add locator to hash
}
return uHash;
}
int GetConstStrOffset ( int64_t iValue )
{
return (int)( iValue>>32 );
}
int GetConstStrLength ( int64_t iValue )
{
return (int)( iValue & 0xffffffffUL );
}
/////////////////////////////////////////////////////////////////////
void ConstList_c::Add ( int64_t iValue )
{
if ( m_eRetType==SPH_ATTR_FLOAT )
m_dFloats.Add ( (float)iValue );
else
{
m_eRetType = WidestType ( m_eRetType, GetIntType(iValue) );
m_dInts.Add(iValue);
}
}
void ConstList_c::Add ( float fValue )
{
if ( m_eRetType!=SPH_ATTR_FLOAT )
{
assert ( m_dFloats.GetLength()==0 );
ARRAY_FOREACH ( i, m_dInts )
m_dFloats.Add ( (float)m_dInts[i] );
m_dInts.Reset ();
m_eRetType = SPH_ATTR_FLOAT;
}
m_dFloats.Add ( fValue );
}
/////////////////////////////////////////////////////////////////////
Expr_Unary_c::Expr_Unary_c ( const char * szClassName, ISphExpr * pFirst )
: m_pFirst ( pFirst )
, m_szExprName ( szClassName )
{
SafeAddRef ( pFirst );
}
void Expr_Unary_c::FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema )
{
if ( m_pFirst )
m_pFirst->FixupLocator ( pOldSchema, pNewSchema );
}
void Expr_Unary_c::Command ( ESphExprCommand eCmd, void * pArg )
{
if ( m_pFirst )
m_pFirst->Command ( eCmd, pArg );
}
uint64_t Expr_Unary_c::GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME_NOCHECK(m_szExprName);
CALC_CHILD_HASH(m_pFirst);
return CALC_DEP_HASHES();
}
Expr_Unary_c::Expr_Unary_c ( const Expr_Unary_c & rhs )
: m_pFirst ( SafeClone (rhs.m_pFirst) )
, m_szExprName ( rhs.m_szExprName )
{}
Expr_Binary_c::Expr_Binary_c ( const char * szClassName, ISphExpr * pFirst, ISphExpr * pSecond )
: m_pFirst ( pFirst )
, m_pSecond ( pSecond )
, m_szExprName ( szClassName )
{
SafeAddRef ( pFirst );
SafeAddRef ( pSecond );
}
void Expr_Binary_c::FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema )
{
m_pFirst->FixupLocator ( pOldSchema, pNewSchema );
if ( m_pSecond )
m_pSecond->FixupLocator ( pOldSchema, pNewSchema );
}
void Expr_Binary_c::Command ( ESphExprCommand eCmd, void * pArg )
{
m_pFirst->Command ( eCmd, pArg );
if ( m_pSecond )
m_pSecond->Command ( eCmd, pArg );
}
uint64_t Expr_Binary_c::GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME_NOCHECK(m_szExprName);
CALC_CHILD_HASH(m_pFirst);
CALC_CHILD_HASH(m_pSecond);
return CALC_DEP_HASHES();
}
Expr_Binary_c::Expr_Binary_c ( const Expr_Binary_c & rhs )
: m_pFirst ( SafeClone (rhs.m_pFirst) )
, m_pSecond ( SafeClone (rhs.m_pSecond) )
, m_szExprName ( rhs.m_szExprName )
{}
| 5,299
|
C++
|
.cpp
| 162
| 30.660494
| 139
| 0.689317
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,905
|
debug_cmds.cpp
|
manticoresoftware_manticoresearch/src/debug_cmds.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinxql_debug.h"
#include "searchdaemon.h"
#include "searchdreplication.h"
#include "searchdtask.h"
#include "digest_sha1.h"
#include "tracer.h"
#include "netfetch.h"
#include "taskmalloctrim.h"
#include "tasksavestate.h"
#include <csignal>
/////////////////////////////////////////////////////////////////////////////
namespace DebugCmd {
enum Traits_e : BYTE
{
NONE = 0,
NEED_VIP = 1,
NO_WIN = 2,
MALLOC_STATS = 4,
MALLOC_TRIM = 8,
};
struct CmdNotice_t
{
BYTE m_uTraits;
const char * m_szExample;
const char * m_szExplanation;
};
//static constexpr std::array<CmdNotice_t, (BYTE)Cmd_e::INVALID_CMD> dCommands {
static constexpr CmdNotice_t dCommands[(BYTE) Cmd_e::INVALID_CMD] {
{ NEED_VIP, "debug shutdown <password>", "emulate TERM signal" },
{ NEED_VIP, "debug crash <password>", "crash daemon (make SIGSEGV action)" },
{ NONE, "debug token <password>", "calculate token for password" },
{ MALLOC_STATS, "debug malloc_stats", "perform 'malloc_stats', result in searchd.log" },
{ MALLOC_TRIM, "debug malloc_trim", "pefrorm 'malloc_trim' call" },
{ NEED_VIP | NO_WIN, "debug procdump", "ask watchdog to dump us" },
{ NEED_VIP | NO_WIN, "debug setgdb on|off", "enable or disable potentially dangerous crash dumping with gdb" },
{ NEED_VIP | NO_WIN, "debug setgdb status", "show current mode of gdb dumping" },
{ NONE, "debug sleep <N>", "sleep for <N> seconds" },
{ NONE, "debug tasks", "display global tasks stat (use select from @@system.tasks instead)" },
{ NONE, "debug sched", "display task manager schedule (use select from @@system.sched instead)" },
{ NONE, "debug merge <TBL> [chunk] <X> [into] [chunk] <Y> [option sync=1,byid=0]",
"For RT table <TBL> merge disk chunk X into disk chunk Y" },
{ NONE, "debug drop [chunk] <X> [from] <TBL> [option sync=1]",
"For RT table <TBL> drop disk chunk X" },
{ NONE, "debug files <TBL> [option format=all|external]",
"list files belonging to <TBL>. 'all' - including external (wordforms, stopwords, etc.)" },
{ NONE, "debug close", "ask server to close connection from it's side" },
{ NONE, "debug compress <TBL> [chunk] <X> [option sync=1]",
"Compress disk chunk X of RT table <TBL> (wipe out deleted documents)" },
{ NONE, "debug dedup <TBL> [chunk] <X>",
"Kill duplicates in disk chunk X of RT table <TBL> (mark duplicates as killed)" },
{ NONE, "debug split <TBL> [chunk] <X> on @<uservar> [option sync=1]",
"Split disk chunk X of RT table <TBL> using set of DocIDs from @uservar" },
{ NO_WIN, "debug wait <cluster> [like 'xx'] [option timeout=3]", "wait <cluster> ready, but no more than 3 secs." },
{ NO_WIN, "debug wait <cluster> status <N> [like 'xx'] [option timeout=13]", "wait <cluster> commit achieve <N>, but no more than 13 secs" },
{ NONE, "debug meta", "Show max_matches/pseudo_shards. Needs set profiling=1" },
{ NONE, "debug trace OFF|'path/to/file' [<N>]", "trace flow to file until N bytes written, or 'trace OFF'" },
{ NONE, "debug curl <URL>", "request given url via libcurl" },
{ NONE, "debug pause <ID> on|off", "switch named breakpoint [dev only]" },
};
}
/////////////////////////////////////////////////////////////////////////////
namespace {
CSphString g_sShutdownToken;
}
void SetShutdownToken ( CSphString sToken ) noexcept
{
g_sShutdownToken = std::move ( sToken );
}
// stuff for command 'debug', isolated
inline static CSphString strSHA1 ( const CSphString& sLine )
{
return CalcSHA1 ( sLine.cstr(), sLine.Length() );
}
// defined in searchd.cpp
int GetLogFD ();
void HandleMysqlOptimizeManual ( RowBuffer_i & tOut, const DebugCmd::DebugCommand_t & tCmd )
{
if ( !sphCheckWeCanModify ( tOut ) )
return;
auto sIndex = tCmd.m_sParam;
auto pIndex = GetServed ( sIndex );
if ( !ServedDesc_t::IsMutable ( pIndex ) )
{
tOut.Error ( "MERGE requires an existing RT table" );
return;
}
OptimizeTask_t tTask;
tTask.m_eVerb = OptimizeTask_t::eMerge;
tTask.m_iFrom = (int)tCmd.m_iPar1;
tTask.m_iTo = (int)tCmd.m_iPar2;
tTask.m_bByOrder = !tCmd.bOpt ( "byid", session::GetOptimizeById() );
tTask.m_iCutoff = (int)tCmd.iOpt("cutoff");
RIdx_T<RtIndex_i *> ( pIndex )->StartOptimize ( std::move ( tTask ) );
if ( tCmd.bOpt ( "sync" ) && !PollOptimizeRunning ( sIndex ) )
tOut.Error ( "RT table went away during waiting" );
else
tOut.Ok ();
}
// command 'drop [chunk] X [from] <IDX> [option...]'
void HandleMysqlDropManual ( RowBuffer_i & tOut, const DebugCmd::DebugCommand_t & tCmd )
{
if ( !sphCheckWeCanModify ( tOut ) )
return;
auto sIndex = tCmd.m_sParam;
auto pIndex = GetServed ( sIndex );
if ( !ServedDesc_t::IsMutable ( pIndex ) )
{
tOut.Error ( "DROP requires an existing RT table" );
return;
}
OptimizeTask_t tTask;
tTask.m_eVerb = OptimizeTask_t::eDrop;
tTask.m_iFrom = (int)tCmd.m_iPar1;
tTask.m_bByOrder = !tCmd.bOpt ( "byid", session::GetOptimizeById() );
RIdx_T<RtIndex_i *> ( pIndex )->StartOptimize ( std::move ( tTask ) );
if ( tCmd.bOpt ( "sync" ) && !PollOptimizeRunning ( sIndex ) )
tOut.Error ( "RT table went away during waiting" );
else
tOut.Ok ();
}
void HandleMysqlCompress ( RowBuffer_i & tOut, const DebugCmd::DebugCommand_t & tCmd )
{
if ( !sphCheckWeCanModify ( tOut ) )
return;
auto sIndex = tCmd.m_sParam;
auto pIndex = GetServed ( sIndex );
if ( !ServedDesc_t::IsMutable ( pIndex ) )
{
tOut.Error ( "COMPRESS requires an existing RT table" );
return;
}
OptimizeTask_t tTask;
tTask.m_eVerb = OptimizeTask_t::eCompress;
tTask.m_iFrom = (int) tCmd.m_iPar1;
tTask.m_bByOrder = !tCmd.bOpt ( "byid", session::GetOptimizeById() );
RIdx_T<RtIndex_i *> ( pIndex )->StartOptimize ( std::move ( tTask ) );
if ( tCmd.bOpt ( "sync" ) && !PollOptimizeRunning ( sIndex ) )
tOut.Error ( "RT table went away during waiting" );
else
tOut.Ok ();
}
void HandleMysqlDedup ( RowBuffer_i& tOut, const DebugCmd::DebugCommand_t& tCmd )
{
if ( !sphCheckWeCanModify ( tOut ) )
return;
auto sIndex = tCmd.m_sParam;
auto pIndex = GetServed ( sIndex );
if ( !ServedDesc_t::IsMutable ( pIndex ) )
{
tOut.Error ( "DEDUP requires an existing RT table" );
return;
}
OptimizeTask_t tTask;
tTask.m_eVerb = OptimizeTask_t::eDedup;
tTask.m_iFrom = (int)tCmd.m_iPar1;
tTask.m_bByOrder = !tCmd.bOpt ( "byid", session::GetOptimizeById() );
RIdx_T<RtIndex_i*> ( pIndex )->Optimize ( std::move ( tTask ) );
tOut.Ok();
}
// command 'split <IDX> [chunk] N on @uservar [option...]'
// IDX is tCmd.m_sParam
// chunk is tCmd.m_iPar1
// uservar is tCmd.m_sParam2
void HandleMysqlSplit ( RowBuffer_i & tOut, const DebugCmd::DebugCommand_t & tCmd )
{
if ( !sphCheckWeCanModify ( tOut ) )
return;
// check index existance
auto sIndex = tCmd.m_sParam;
auto pIndex = GetServed ( sIndex );
if ( !ServedDesc_t::IsMutable ( pIndex ) )
{
tOut.Error ( "SPLIT requires an existing RT table" );
return;
}
bool bVarFound = false;
IterateUservars ( [&tCmd, &bVarFound] ( const NamedRefVectorPair_t & dVar ) {
if ( dVar.first == tCmd.m_sParam2
// && dVar.second.m_eType==USERVAR_INT_SET_TMP // uncomment this to split only by session (result of delete .. store) variables
)
bVarFound = true;
} );
if ( !bVarFound )
{
tOut.Error ( "SPLIT requires an existing session @uservar" );
return;
}
OptimizeTask_t tTask;
tTask.m_eVerb = OptimizeTask_t::eSplit;
tTask.m_iFrom = (int)tCmd.m_iPar1;
tTask.m_sUvarFilter = tCmd.m_sParam2;
tTask.m_bByOrder = !tCmd.bOpt ( "byid", session::GetOptimizeById() );
RIdx_T<RtIndex_i *> ( pIndex )->StartOptimize ( std::move ( tTask ) );
if ( tCmd.bOpt ( "sync" ) && !PollOptimizeRunning ( sIndex ) )
tOut.Error ( "RT table went away during waiting" );
else
tOut.Ok ();
}
void HandleMysqlDebugMeta ( RowBuffer_i & tOut, const DebugCmd::DebugCommand_t & tCmd, const QueryProfile_c & tProfile )
{
VectorLike tLike ( tCmd.sOpt ( "like" ) );
tLike.MatchTupletf ( "pseudo_shards", "%d", tProfile.m_iPseudoShards );
tLike.MatchTupletf ( "max_matches", "%d", tProfile.m_iMaxMatches );
tOut.DataTable(tLike);
}
void HandleMysqlfiles ( RowBuffer_i & tOut, const DebugCmd::DebugCommand_t & tCmd )
{
auto sIndex = tCmd.m_sParam;
auto pIndex = GetServed ( sIndex );
if ( !ServedDesc_t::IsLocal ( pIndex ) )
{
tOut.Error ( "FILES requires an existing local table" );
return;
}
StrVec_t dFiles;
StrVec_t dExt;
RIdx_c ( pIndex )->GetIndexFiles ( dFiles, dExt );
VectorLike dOut ( 0 );
dOut.SetColNames ( { "file" } );
auto sFormat = tCmd.sOpt ( "format" );
if ( sFormat!="external" )
dFiles.Apply ( [&dOut] ( const CSphString & a ) { dOut.Add ( a ); } );
if ( sFormat=="all" || sFormat=="external" )
{
dExt.Uniq ();
dExt.Apply ( [&dOut] ( const CSphString & a ) { dOut.Add ( a ); } );
}
tOut.DataTable ( dOut );
}
void HandleMysqlclose ( RowBuffer_i & tOut )
{
auto iSocket = session::Info().GetSocket();
if ( iSocket >= 0 )
sphSockClose ( iSocket );
}
void HandleShutdownCrash ( RowBuffer_i & tOut, const CSphString & sPasswd, DebugCmd::Cmd_e eCmd )
{
const char * szCmd = DebugCmd::dCommands[(BYTE) eCmd].m_szExample;
if ( g_sShutdownToken.IsEmpty () )
{
tOut.Error ( "shutdown_token is empty. Provide it in searchd config section." );
return;
}
if ( strSHA1 ( sPasswd )!=g_sShutdownToken )
{
tOut.Error ( "FAIL" );
return;
}
tOut.HeadTuplet ( "command", "result" );
tOut.DataTuplet ( szCmd, "SUCCESS" );
tOut.Eof ();
if ( eCmd==DebugCmd::Cmd_e::SHUTDOWN )
{
sphInterruptNow ();
} else // crash
{
BYTE * pSegv = (BYTE *) ( 0 );
*pSegv = 'a';
}
}
#if !_WIN32
void HandleProcDump ( RowBuffer_i & tOut )
{
auto & iParentPid = getParentPID ();
tOut.HeadTuplet ( "command", "result" );
if ( iParentPid<=0 )
tOut.DataTuplet ( "procdump", "Unavailable (no watchdog)" );
else
{
kill ( iParentPid, SIGUSR1 );
tOut.DataTupletf ( "procdump", "Sent USR1 to wathcdog (%d)", iParentPid );
}
tOut.Eof ();
}
void HandleGdbStatus ( RowBuffer_i & tOut )
{
auto& iParentPid = getParentPID ();
tOut.HeadTuplet ( "command", "result" );
const auto & g_bSafeGDB = getSafeGDB ();
if ( iParentPid>0 )
tOut.DataTupletf ( "setgdb", "Enabled, managed by watchdog (pid=%d)", iParentPid );
else if ( g_bSafeGDB )
tOut.DataTupletf ( "setgdb", "Enabled, managed locally because of jemalloc", iParentPid );
else if ( iParentPid==-1 )
tOut.DataTuplet ( "setgdb", "Enabled locally, MAY HANG!" );
else
tOut.DataTuplet ( "setgdb", "Disabled" );
tOut.Eof ();
}
void HandleSetGdb ( RowBuffer_i & tOut, bool bParam )
{
auto & iParentPid = getParentPID ();
tOut.HeadTuplet ( "command", "result" );
const auto & g_bSafeGDB = getSafeGDB ();
if ( iParentPid>0 )
tOut.DataTupletf ( "setgdb", "Enabled by watchdog (pid=%d)", iParentPid );
else if ( g_bSafeGDB )
tOut.DataTuplet ( "setgdb", "Enabled locally because of jemalloc" );
else if ( bParam )
{
iParentPid = -1;
tOut.DataTuplet ( "setgdb", "Ok, enabled locally, MAY HANG!" );
} else if ( !bParam )
{
iParentPid = 0;
tOut.DataTuplet ( "setgdb", "Ok, disabled" );
}
tOut.Eof ();
}
void HandleWait ( RowBuffer_i& tOutBuf, const DebugCmd::DebugCommand_t& tCmd )
{
auto iTimeoutS = tCmd.iOpt ( "timeout" );
auto sCluster = tCmd.m_sParam;
auto iTime = -sphMicroTimer();
auto sState = WaitClusterReady ( sCluster, iTimeoutS );
iTime += sphMicroTimer();
VectorLike tOut { tCmd.sOpt ( "like" ) };
tOut.SetColName("name");
tOut.MatchTuplet ( "cluster", sCluster.cstr() );
tOut.MatchTuplet ( "state", sState.cstr() );
tOut.MatchTupletf ( "time", "%.2t", iTime );
tOutBuf.DataTable ( tOut );
}
void HandleWaitStatus ( RowBuffer_i& tOutBuf, const DebugCmd::DebugCommand_t& tCmd )
{
auto iTimeoutS = tCmd.iOpt ( "timeout" );
auto sCluster = tCmd.m_sParam;
auto iTxn = (int)tCmd.m_iPar1;
auto iTime = -sphMicroTimer();
auto tAchieved = WaitClusterCommit ( sCluster, iTxn, iTimeoutS );
iTime += sphMicroTimer();
VectorLike tOut { tCmd.sOpt ( "like" ) };
tOut.SetColName ( "name" );
tOut.MatchTuplet ( "cluster", sCluster.cstr() );
tOut.MatchTupletf ( "wanted", "%d", iTxn );
if ( tAchieved.first>=0 )
tOut.MatchTupletf ( "state", "%d", tAchieved.first );
else
tOut.MatchTuplet ( "achieved", tAchieved.second.cstr() );
tOut.MatchTupletf ( "time", "%.2t", iTime );
tOutBuf.DataTable ( tOut );
}
#endif
void HandleTrace ( RowBuffer_i& tOut, const DebugCmd::DebugCommand_t& tCmd )
{
tOut.HeadTuplet ( "command", "result" );
#ifdef PERFETTO
if ( tCmd.m_sParam.IsEmpty() )
{
if ( !tCmd.m_iPar1 )
{
Tracer::Stop();
}
} else
{
Tracer::Start ( tCmd.m_sParam, tCmd.m_iPar1 );
}
tOut.DataTuplet ( "debug trace ...", "SUCCESS" );
#else
tOut.DataTuplet ( "debug trace ...", "FAIL, need to rebuild with Perfetto, look to src/perfetto/README.txt" );
#endif
tOut.Eof();
}
void HandleToken ( RowBuffer_i & tOut, const CSphString & sParam )
{
auto sSha = strSHA1 ( sParam );
tOut.HeadTuplet ( "command", "result" );
tOut.DataTuplet ( "debug token", sSha.cstr () );
tOut.Eof ();
}
void HandleCurl ( RowBuffer_i & tOut, const CSphString & sParam )
{
auto sRes = FetchUrl ( sParam );
tOut.HeadTuplet ( "command", "result" );
tOut.DataTuplet ( "curl", sRes.cstr() );
tOut.Eof();
}
void HandlePause ( RowBuffer_i & tOut, const DebugCmd::DebugCommand_t & tCmd )
{
tOut.HeadTuplet ( "command", "result" );
auto bPause = tCmd.m_iPar1!=0;
PauseAt ( tCmd.m_sParam, bPause );
tOut.DataTuplet ( "debug pause ...", bPause ? "Set" : "Unset" );
tOut.Eof ();
}
#if HAVE_MALLOC_STATS
void HandleMallocStats ( RowBuffer_i & tOut, const CSphString& sParam )
{
tOut.HeadTuplet ( "command", "result" );
// check where is stderr...
int iOldErr = ::dup ( STDERR_FILENO );
::dup2 ( GetLogFD (), STDERR_FILENO );
sphMallocStats ( sParam.cstr() );
::close ( STDERR_FILENO );
::dup2 ( iOldErr, STDERR_FILENO );
::close ( iOldErr );
tOut.DataTuplet ( "malloc_stats", sphGetLogFile().cstr () );
tOut.Eof ();
}
#endif
#if HAVE_MALLOC_TRIM
void HandleMallocTrim ( RowBuffer_i & tOut )
{
tOut.HeadTuplet ( "command", "result" );
CSphString sResult;
sResult.SetSprintf ( "%d", PerformMallocTrim ( 0 ) );
tOut.DataTuplet ( "malloc_trim", sResult.cstr () );
tOut.Eof ();
}
#endif
void HandleSleep ( RowBuffer_i & tOut, int64_t iParam )
{
int64_t tmStart = sphMicroTimer ();
Threads::Coro::SleepMsec ( Max ( iParam/1000, 1 ) );
int64_t tmDelta = sphMicroTimer ()-tmStart;
tOut.HeadTuplet ( "command", "result" );
CSphString sResult;
sResult.SetSprintf ( "%.3f", (float) tmDelta / 1000000.0f );
tOut.DataTuplet ( "sleep", sResult.cstr () );
tOut.Eof ();
}
void HandleTasks ( RowBuffer_i & tOut )
{
if (!tOut.HeadOfStrings ( { "Name", "MaxRunners", "CurrentRunners", "TotalSpent", "LastFinished", "Executed", "Dropped", "Enqueued" } ))
return;
auto dTasks = TaskManager::GetTaskInfo ();
for ( const auto & dTask : dTasks )
{
tOut.PutString ( dTask.m_sName );
if ( dTask.m_iMaxRunners > 0 )
tOut.PutNumAsString ( dTask.m_iMaxRunners );
else
tOut.PutString ( "unlimited" );
tOut.PutNumAsString ( dTask.m_iCurrentRunners );
tOut.PutTimeAsString ( dTask.m_iTotalSpent );
tOut.PutTimestampAsString ( dTask.m_iLastFinished );
tOut.PutNumAsString ( dTask.m_iTotalRun );
tOut.PutNumAsString ( dTask.m_iTotalDropped );
tOut.PutNumAsString ( dTask.m_iAllRunners );
if ( !tOut.Commit () )
return;
}
tOut.Eof ();
}
void HandleSched ( RowBuffer_i & tOut )
{
if (!tOut.HeadOfStrings ( { "Time rest", "Task" } ))
return;
auto dTasks = sph::GetSchedInfo ();
for ( auto& dTask : dTasks )
{
tOut.PutTimestampAsString ( dTask.m_iTimeoutStamp );
tOut.PutString ( dTask.m_sTask );
if (!tOut.Commit ())
return;
}
tOut.Eof ();
}
void HandleMysqlDebug ( RowBuffer_i &tOut, const DebugCmd::DebugCommand_t* pCommand, const QueryProfile_c & tProfile )
{
using namespace DebugCmd;
bool bVipConn = session::GetVip ();
assert ( pCommand->Valid() );
const auto& tCmd = *pCommand;
if ( bVipConn )
{
switch ( tCmd.m_eCommand )
{
case Cmd_e::SHUTDOWN:
case Cmd_e::CRASH: HandleShutdownCrash ( tOut, tCmd.m_sParam, tCmd.m_eCommand ); return;
#if !_WIN32
case Cmd_e::PROCDUMP: HandleProcDump ( tOut ); return;
case Cmd_e::SETGDB: HandleSetGdb ( tOut, tCmd.m_iPar1!=0 ); return;
case Cmd_e::GDBSTATUS: HandleGdbStatus ( tOut ); return;
#endif
default: break;
}
}
switch ( tCmd.m_eCommand )
{
#if HAVE_MALLOC_STATS
case Cmd_e::MALLOC_STATS: HandleMallocStats ( tOut, tCmd.m_sParam ); return;
#endif
#if HAVE_MALLOC_TRIM
case Cmd_e::MALLOC_TRIM: HandleMallocTrim ( tOut ); return;
#endif
case Cmd_e::TOKEN: HandleToken ( tOut, tCmd.m_sParam ); return;
case Cmd_e::SLEEP: HandleSleep ( tOut, tCmd.m_iPar1 ); return;
case Cmd_e::TASKS: HandleTasks ( tOut ); return;
case Cmd_e::SCHED: HandleSched ( tOut ); return;
case Cmd_e::MERGE: HandleMysqlOptimizeManual ( tOut, tCmd ); return;
case Cmd_e::DROP: HandleMysqlDropManual ( tOut, tCmd ); return;
case Cmd_e::FILES: HandleMysqlfiles ( tOut, tCmd ); return;
case Cmd_e::CLOSE: HandleMysqlclose ( tOut ); return;
case Cmd_e::COMPRESS: HandleMysqlCompress ( tOut, tCmd ); return;
case Cmd_e::DEDUP: HandleMysqlDedup ( tOut, tCmd ); return;
case Cmd_e::SPLIT: HandleMysqlSplit ( tOut, tCmd ); return;
case Cmd_e::META: HandleMysqlDebugMeta ( tOut, tCmd, tProfile ); return;
#if !_WIN32
case Cmd_e::WAIT: HandleWait ( tOut, tCmd ); return;
case Cmd_e::WAIT_STATUS: HandleWaitStatus ( tOut, tCmd ); return;
#endif
case Cmd_e::TRACE: HandleTrace ( tOut, tCmd ); return;
case Cmd_e::CURL: HandleCurl ( tOut, tCmd.m_sParam ); return;
case Cmd_e::PAUSE: HandlePause ( tOut, tCmd ); return;
default: break;
}
// no known command; provide short help.
BYTE uMask = bVipConn ? DebugCmd::NEED_VIP : DebugCmd::NONE;
#if !_WIN32
uMask |= DebugCmd::NO_WIN;
#endif
#if HAVE_MALLOC_STATS
uMask |= DebugCmd::MALLOC_STATS;
#endif
#if HAVE_MALLOC_TRIM
uMask |= DebugCmd::MALLOC_TRIM;
#endif
// display a short help
tOut.HeadTuplet ( "command", "meaning" );
tOut.DataTuplet ( "flush logs", "emulate USR1 signal" );
tOut.DataTuplet ( "reload tables", "emulate HUP signal" );
for ( const auto& dCommand : DebugCmd::dCommands )
if ( ( dCommand.m_uTraits & uMask )==dCommand.m_uTraits )
tOut.DataTuplet ( dCommand.m_szExample, dCommand.m_szExplanation );
tOut.Eof ();
}
| 18,708
|
C++
|
.cpp
| 538
| 32.702602
| 142
| 0.683742
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,906
|
netreceive_http.cpp
|
manticoresoftware_manticoresearch/src/netreceive_http.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "netreceive_http.h"
#include "searchdssl.h"
#include "searchdhttp.h"
#include "tracer.h"
extern int g_iClientTimeoutS; // from searchd.cpp
extern volatile bool g_bMaintenance;
void HttpServe ( std::unique_ptr<AsyncNetBuffer_c> pBuf )
{
auto& tSess = session::Info();
// non-vip connections in maintainance should be already rejected on accept
assert ( !g_bMaintenance || tSess.GetVip() );
bool bINeedSSL = tSess.GetProto ()==Proto_e::HTTPS;
bool bHeNeedSSL = tSess.GetSsl();
bool bICanSSL = bHeNeedSSL ? CheckWeCanUseSSL () : false;
tSess.SetProto ( Proto_e::HTTP );
// he needs, but I can't
if ( bHeNeedSSL && !bICanSSL )
{
if ( bINeedSSL )
LogNetError ( "Client tries to connect with https to secure port, but we can't serve" );
// that will drop the connection (we can't say anything as can't encrypt our message)
return;
}
// he connects to secured port with plain http
if ( bINeedSSL && !bHeNeedSSL )
{
CSphVector<BYTE> dResult;
sphHttpErrorReply ( dResult, EHTTP_STATUS::_400, "The plain HTTP request was sent to HTTPS port" );
auto & tOut = *(GenericOutputBuffer_c *) pBuf.get();
tOut.SwapData ( dResult );
tOut.Flush (); // no need to check return code since we break anyway
return;
}
// set off query guard
auto & tCrashQuery = GlobalCrashQueryGetRef();
tCrashQuery.m_eType = QUERY_JSON;
// needed to check permission to turn maintenance mode on/off
if ( bHeNeedSSL )
tSess.SetSsl ( MakeSecureLayer ( pBuf ) );
auto& tOut = *(GenericOutputBuffer_c *) pBuf.get();
auto& tIn = *(AsyncNetInputBuffer_c *) pBuf.get();
CSphString sError;
bool bOk = true;
HttpRequestParser_c tParser;
CSphVector<BYTE> dResult;
TRACE_CONN ( "conn", "HttpServe" );
auto HttpReply = [&dResult, &tOut] ( EHTTP_STATUS eCode, Str_t sMsg )
{
if ( IsEmpty ( sMsg ) )
{
HttpBuildReply ( dResult, eCode, sMsg, false );
} else
{
LogNetError ( sMsg.first );
sphHttpErrorReply ( dResult, eCode, sMsg.first );
}
tOut.SwapData ( dResult );
return tOut.Flush();
};
do
{
tIn.DiscardProcessed ( -1 ); // -1 means 'force flush'
tParser.Reinit();
tSess.SetKilled ( false );
// read HTTP header
while ( !tParser.ParseHeader ( tIn.Tail() ) )
{
tIn.PopTail ();
auto iChunk = tIn.ReadAny();
if ( iChunk > 0 )
continue;
if ( !iChunk || tIn.GetError() )
{
sError.SetSprintf ( "failed to receive HTTP request, %s", ( tIn.GetError() ? tIn.GetErrorMessage().cstr() : sphSockError() ) );
HttpReply ( EHTTP_STATUS::_400, FromStr ( sError ) );
}
return;
}
// malformed header
if ( tParser.Error() )
{
if ( tIn.GetError() )
sError.SetSprintf ( "%s, %s", tIn.GetErrorMessage().cstr(), tParser.Error() );
else
sError = tParser.Error();
HttpReply ( EHTTP_STATUS::_400, FromStr ( sError ) );
break;
}
session::Info().SetBuddy ( tParser.IsBuddyQuery() );
// check if we should interrupt because of maxed-out
// but not for buddy queries
if ( IsMaxedOut() )
{
HttpReply ( EHTTP_STATUS::_503, g_sMaxedOutMessage );
gStats().m_iMaxedOut.fetch_add ( 1, std::memory_order_relaxed );
break;
}
// process keep-alive conditions
if ( tParser.KeepAlive() )
{
if ( !tSess.GetPersistent() )
tIn.SetTimeoutUS ( S2US * g_iClientTimeoutS );
tSess.SetPersistent ( true );
} else {
if ( tSess.GetPersistent() )
tIn.SetTimeoutUS ( S2US * g_iReadTimeoutS );
tSess.SetPersistent ( false );
}
// if first chunk is (most probably) pure header, we can proceed special headers here
if ( tParser.Expect100() && !tParser.ParsedBodyLength() )
{
if ( !HttpReply ( EHTTP_STATUS::_100, dEmptyStr ) )
break;
LogReplyStatus100();
}
// tracer.Instant ( [&tIn](StringBuilder_c& sOut) {sOut<< ",\"args\":{\"step\":"<<tIn.HasBytes()<<"}";} );
bOk = tParser.ProcessClientHttp ( tIn, dResult );
tOut.SwapData (dResult);
if ( !tOut.Flush () )
break;
pBuf->SyncErrorState();
if ( tIn.GetError() )
LogNetError ( tIn.GetErrorMessage().cstr() );
pBuf->ResetError();
} while ( tSess.GetPersistent() && bOk );
}
| 4,576
|
C++
|
.cpp
| 137
| 30.408759
| 131
| 0.683912
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,907
|
sphinx_alter.cpp
|
manticoresoftware_manticoresearch/src/sphinx_alter.cpp
|
//
// Copyright (c) 2021-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinx_alter.h"
#include "attribute.h"
#include "fileio.h"
#include "columnarmisc.h"
#include "docstore.h"
namespace {
void CreateAttrMap ( CSphVector<int> & dAttrMap, const CSphSchema & tOldSchema, const CSphSchema & tNewSchema, int iAttrToRemove )
{
dAttrMap.Resize ( tOldSchema.GetAttrsCount() );
for ( int iAttr = 0; iAttr < tOldSchema.GetAttrsCount(); iAttr++ )
{
const CSphColumnInfo & tOldAttr = tOldSchema.GetAttr(iAttr);
if ( iAttr!=iAttrToRemove && !tOldAttr.IsColumnar() )
dAttrMap[iAttr] = tNewSchema.GetAttrIndex ( tOldAttr.m_sName.cstr() );
else
dAttrMap[iAttr] = -1;
}
}
const CSphRowitem * CopyRow ( const CSphRowitem * pDocinfo, DWORD * pTmpDocinfo, int iOldStride )
{
memcpy ( pTmpDocinfo, pDocinfo, iOldStride*sizeof(DWORD) );
return pDocinfo + iOldStride;
}
const CSphRowitem * CopyRowAttrByAttr ( const CSphRowitem * pDocinfo, DWORD * pTmpDocinfo, const CSphSchema & tOldSchema, const CSphSchema & tNewSchema, const CSphVector<int> & dAttrMap, int iOldStride )
{
for ( int iAttr = 0; iAttr < tOldSchema.GetAttrsCount(); iAttr++ )
{
if ( dAttrMap[iAttr]==-1 )
continue;
const CSphColumnInfo & tOldAttr = tOldSchema.GetAttr(iAttr);
const CSphColumnInfo & tNewAttr = tNewSchema.GetAttr(dAttrMap[iAttr]);
if ( sphIsBlobAttr(tOldAttr) )
continue;
SphAttr_t tValue = sphGetRowAttr ( pDocinfo, tOldAttr.m_tLocator );
sphSetRowAttr ( pTmpDocinfo, tNewAttr.m_tLocator, tValue );
}
return pDocinfo + iOldStride;
}
void AddToSchema ( CSphSchema & tSchema, const AttrAddRemoveCtx_t & tCtx, CSphString & sError )
{
bool bColumnar = !!(tCtx.m_uFlags & CSphColumnInfo::ATTR_COLUMNAR);
const CSphColumnInfo * pBlobLocator = tSchema.GetAttr ( sphGetBlobLocatorName() );
bool bBlob = sphIsBlobAttr ( tCtx.m_eType );
bool bRebuild = false;
if ( bBlob && !bColumnar && !pBlobLocator )
{
bRebuild = true;
CSphColumnInfo tCol ( sphGetBlobLocatorName() );
tCol.m_eAttrType = SPH_ATTR_BIGINT;
// should be right after docid
tSchema.InsertAttr ( 1, tCol, false );
}
CSphColumnInfo tInfo ( tCtx.m_sName.cstr(), tCtx.m_eType );
tInfo.m_uAttrFlags = tCtx.m_uFlags;
tInfo.m_eEngine = tCtx.m_eEngine;
tInfo.m_tLocator.m_iBitCount = tCtx.m_iBits;
tInfo.m_tKNN = tCtx.m_tKNN;
auto iIdxExisting = tSchema.GetAttrIndex ( tCtx.m_sName.cstr() );
if ( iIdxExisting >= 0 )
{
bRebuild = true;
tSchema.RemoveAttr ( tCtx.m_sName.cstr(), false );
tSchema.InsertAttr ( iIdxExisting, tInfo, false );
} else if ( tSchema.GetAttrId_FirstFieldLen()!=-1 )
{
bRebuild = true;
tSchema.InsertAttr ( tSchema.GetAttrId_FirstFieldLen(), tInfo, false );
}
else
tSchema.AddAttr ( tInfo, false );
// rebuild locators in the schema
if ( bRebuild )
{
const char * szTmpColName = "$_tmp";
CSphColumnInfo tTmpCol ( szTmpColName, SPH_ATTR_BIGINT );
tSchema.AddAttr ( tTmpCol, false );
tSchema.RemoveAttr ( szTmpColName, false );
}
}
bool RemoveFromSchema ( CSphSchema & tSchema, const CSphString & sAttrName, ESphAttr eAttrType, CSphString & sError )
{
bool bColumnar = false;
const CSphColumnInfo * pAttrToRemove = tSchema.GetAttr ( sAttrName.cstr() );
if ( pAttrToRemove )
bColumnar = pAttrToRemove->IsColumnar();
bool bBlob = sphIsBlobAttr(eAttrType);
const CSphColumnInfo * pBlobLocator = tSchema.GetAttr ( sphGetBlobLocatorName() );
tSchema.RemoveAttr ( sAttrName.cstr(), false );
if ( bBlob && !bColumnar && pBlobLocator )
{
// remove blob locator if no blobs are left
int nBlobs = 0;
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
if ( sphIsBlobAttr ( tSchema.GetAttr(i) ) )
nBlobs++;
if ( !nBlobs )
tSchema.RemoveAttr ( sphGetBlobLocatorName(), false );
}
if ( !tSchema.GetAttrsCount() )
{
sError = "unable to remove the only attribute left";
return false;
}
return true;
}
class AddRemoveCtx_c
{
public:
AddRemoveCtx_c ( const CSphSchema & tOldSchema, const CSphSchema & tNewSchema, const CSphRowitem * pDocinfo, DWORD uNumRows, const BYTE * pBlobPool, WriteWrapper_c & tSPAWriter, WriteWrapper_c & tSPBWriter,
const CSphString & sAttrName, const IndexAlterHelper_c & tMinMaxer );
bool AddRowwiseAttr();
bool RemoveRowwiseAttr();
private:
const CSphSchema & m_tOldSchema;
const CSphSchema & m_tNewSchema;
const CSphRowitem * m_pDocinfo = nullptr;
DWORD m_uNumRows = 0;
const BYTE * m_pBlobPool = nullptr;
WriteWrapper_c & m_tSPAWriter;
WriteWrapper_c & m_tSPBWriter;
const CSphString & m_sAttrName;
const IndexAlterHelper_c & m_tMinMaxer;
const CSphColumnInfo * m_pOldBlobRowLocator = nullptr;
const CSphColumnInfo * m_pNewBlobRowLocator = nullptr;
int m_iNumOldBlobs = 0;
int m_iNumNewBlobs = 0;
bool m_bHadBlobs = false;
bool m_bHaveBlobs = false;
int m_iOldStride = 0;
int m_iNewStride = 0;
CSphFixedVector<DWORD> m_dAttrRow {0};
CSphTightVector<BYTE> m_dBlobRow;
};
AddRemoveCtx_c::AddRemoveCtx_c ( const CSphSchema & tOldSchema, const CSphSchema & tNewSchema, const CSphRowitem * pDocinfo, DWORD uNumRows, const BYTE * pBlobPool, WriteWrapper_c & tSPAWriter, WriteWrapper_c & tSPBWriter,
const CSphString & sAttrName, const IndexAlterHelper_c & tMinMaxer )
: m_tOldSchema ( tOldSchema )
, m_tNewSchema ( tNewSchema )
, m_pDocinfo ( pDocinfo )
, m_uNumRows ( uNumRows )
, m_pBlobPool ( pBlobPool )
, m_tSPAWriter ( tSPAWriter )
, m_tSPBWriter ( tSPBWriter )
, m_sAttrName ( sAttrName )
, m_tMinMaxer ( tMinMaxer )
, m_pOldBlobRowLocator ( tOldSchema.GetAttr ( sphGetBlobLocatorName() ) )
, m_pNewBlobRowLocator ( tNewSchema.GetAttr ( sphGetBlobLocatorName() ) )
, m_iOldStride ( tOldSchema.GetRowSize() )
, m_iNewStride ( tNewSchema.GetRowSize() )
{
for ( int i = 0; i<tOldSchema.GetAttrsCount(); i++ )
if ( sphIsBlobAttr ( tOldSchema.GetAttr(i) ) )
m_iNumOldBlobs++;
for ( int i = 0; i<tNewSchema.GetAttrsCount(); i++ )
if ( sphIsBlobAttr ( tNewSchema.GetAttr(i) ) )
m_iNumNewBlobs++;
m_bHadBlobs = m_iNumOldBlobs>0;
m_bHaveBlobs = m_iNumNewBlobs>0;
m_dAttrRow.Reset(m_iNewStride);
}
bool AddRemoveCtx_c::AddRowwiseAttr()
{
bool bKeepVal = m_tOldSchema.GetAttrIndex ( m_sAttrName.cstr() ) >= 0;
bool bNeedRemap = bKeepVal || m_bHadBlobs != m_bHaveBlobs || m_tNewSchema.GetAttrId_FirstFieldLen() != -1;
CSphVector<int> dAttrMap;
if ( bNeedRemap )
CreateAttrMap ( dAttrMap, m_tOldSchema, m_tNewSchema, -1 );
const CSphColumnInfo * pNewAttr = m_tNewSchema.GetAttr ( m_sAttrName.cstr() );
assert ( pNewAttr );
bool bBlob = sphIsBlobAttr ( *pNewAttr );
const CSphRowitem * pNextDocinfo = nullptr;
for ( RowID_t tRowID = 0; tRowID<m_uNumRows; tRowID++ )
{
if ( bNeedRemap )
pNextDocinfo = CopyRowAttrByAttr ( m_pDocinfo, m_dAttrRow.Begin(), m_tOldSchema, m_tNewSchema, dAttrMap, m_iOldStride );
else
pNextDocinfo = CopyRow ( m_pDocinfo, m_dAttrRow.Begin(), m_iOldStride );
if ( !bKeepVal && !pNewAttr->m_tLocator.IsBlobAttr() )
sphSetRowAttr ( m_dAttrRow.Begin(), pNewAttr->m_tLocator, 0 );
if ( bBlob && !m_tMinMaxer.Alter_IsMinMax ( m_pDocinfo, m_iOldStride ) )
{
sphAddAttrToBlobRow ( m_pDocinfo, m_dBlobRow, m_pBlobPool, m_iNumOldBlobs, m_pOldBlobRowLocator ? &m_pOldBlobRowLocator->m_tLocator : nullptr );
SphOffset_t tRowOffset = m_tSPBWriter.GetPos();
m_tSPBWriter.PutBytes ( m_dBlobRow.Begin(), m_dBlobRow.GetLength() );
if ( m_tSPBWriter.IsError() )
return false;
assert(m_pNewBlobRowLocator);
sphSetRowAttr ( m_dAttrRow.Begin(), m_pNewBlobRowLocator->m_tLocator, tRowOffset );
}
m_tSPAWriter.PutBytes ( (const BYTE *)m_dAttrRow.Begin(), m_iNewStride*sizeof(CSphRowitem) );
if ( m_tSPAWriter.IsError() )
return false;
m_pDocinfo = pNextDocinfo;
}
return true;
}
bool AddRemoveCtx_c::RemoveRowwiseAttr()
{
int iAttrToRemove = m_tOldSchema.GetAttrIndex ( m_sAttrName.cstr() );
const CSphColumnInfo & tOldAttr = m_tOldSchema.GetAttr ( iAttrToRemove );
assert ( iAttrToRemove>=0 );
bool bBlob = sphIsBlobAttr ( tOldAttr );
bool bBlobsModified = bBlob && ( m_bHaveBlobs==m_bHadBlobs );
CSphVector<int> dAttrMap;
CreateAttrMap ( dAttrMap, m_tOldSchema, m_tNewSchema, iAttrToRemove );
const CSphRowitem * pNextDocinfo = nullptr;
for ( RowID_t tRowID = 0; tRowID<m_uNumRows; tRowID++ )
{
pNextDocinfo = CopyRowAttrByAttr ( m_pDocinfo, m_dAttrRow.Begin(), m_tOldSchema, m_tNewSchema, dAttrMap, m_iOldStride );
if ( bBlobsModified && !m_tMinMaxer.Alter_IsMinMax ( m_pDocinfo, m_iOldStride ) )
{
assert(m_pOldBlobRowLocator);
sphRemoveAttrFromBlobRow ( m_pDocinfo, m_dBlobRow, m_pBlobPool, m_iNumOldBlobs, tOldAttr.m_tLocator.m_iBlobAttrId, m_pOldBlobRowLocator->m_tLocator );
SphOffset_t tRowOffset = m_tSPBWriter.GetPos();
m_tSPBWriter.PutBytes ( m_dBlobRow.Begin(), m_dBlobRow.GetLength() );
if ( m_tSPBWriter.IsError() )
return false;
assert(m_pNewBlobRowLocator);
sphSetRowAttr ( m_dAttrRow.Begin(), m_pNewBlobRowLocator->m_tLocator, tRowOffset );
}
m_tSPAWriter.PutBytes ( (const BYTE *)m_dAttrRow.Begin(), m_iNewStride*sizeof(CSphRowitem) );
if ( m_tSPAWriter.IsError() )
return false;
m_pDocinfo = pNextDocinfo;
}
return true;
}
} // unnamed namespace
//////////////////////////////////////////////////////////////////////////
class WriteWrapper_Disk_c : public WriteWrapper_c
{
public:
WriteWrapper_Disk_c ( CSphWriter & tWriter )
: m_tWriter ( tWriter )
{}
void PutBytes ( const BYTE * pData, int iSize ) override
{
m_tWriter.PutBytes ( pData, iSize );
}
SphOffset_t GetPos() const override
{
return m_tWriter.GetPos();
}
bool IsError() const override
{
return m_tWriter.IsError();
}
private:
CSphWriter & m_tWriter;
};
//////////////////////////////////////////////////////////////////////////
template<typename T>
class WriteWrapper_Mem_T : public WriteWrapper_c
{
public:
WriteWrapper_Mem_T ( CSphTightVector<T> & tBuffer )
: m_tBuffer ( tBuffer )
{}
void PutBytes ( const BYTE * pData, int iSize ) override
{
assert ( iSize % sizeof(T) == 0 );
T * pNew = m_tBuffer.AddN ( iSize/sizeof(T) );
memcpy ( pNew, pData, iSize );
}
SphOffset_t GetPos() const override
{
return m_tBuffer.GetLength()*sizeof(T);
}
bool IsError() const override
{
return false;
}
protected:
CSphTightVector<T> & m_tBuffer;
};
//////////////////////////////////////////////////////////////////////////
bool IndexAlterHelper_c::Alter_AddRemoveRowwiseAttr ( const CSphSchema & tOldSchema, const CSphSchema & tNewSchema, const CSphRowitem * pDocinfo, DWORD uNumRows, const BYTE * pBlobPool, WriteWrapper_c & tSPAWriter,
WriteWrapper_c & tSPBWriter, bool bAddAttr, const CSphString & sAttrName )
{
if ( !pDocinfo && tOldSchema.GetRowSize() )
return false;
AddRemoveCtx_c tCtx ( tOldSchema, tNewSchema, pDocinfo, uNumRows, pBlobPool, tSPAWriter, tSPBWriter, sAttrName, *this );
if ( bAddAttr )
return tCtx.AddRowwiseAttr();
else
return tCtx.RemoveRowwiseAttr();
}
bool IndexAlterHelper_c::Alter_AddRemoveColumnar ( bool bAdd, const ISphSchema & tOldSchema, const ISphSchema & tNewSchema, columnar::Columnar_i * pColumnar, columnar::Builder_i * pBuilder, DWORD uRows, const CSphString & sIndex, CSphString & sError )
{
std::string sErrorSTL;
CSphVector<std::pair<std::unique_ptr<columnar::Iterator_i>,ESphAttr>> dIterators;
for ( int i = 0; i < tNewSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tNewAttr = tNewSchema.GetAttr(i);
if ( !tNewAttr.IsColumnar() )
continue;
const CSphColumnInfo * pOldAttr = tOldSchema.GetAttr ( tNewAttr.m_sName.cstr() );
if ( !pOldAttr )
{
dIterators.Add ( { nullptr, tNewAttr.m_eAttrType } );
continue;
}
assert(pColumnar);
auto pIterator = CreateColumnarIterator ( pColumnar, tNewAttr.m_sName.cstr(), sErrorSTL );
if ( !pIterator )
{
sError.SetSprintf ( "%s attribute to %s: %s", bAdd ? "adding" : "removing", sIndex.cstr(), sErrorSTL.c_str() );
return false;
}
dIterators.Add ( { std::move (pIterator), pOldAttr->m_eAttrType } );
}
CSphVector<int64_t> dTmp;
for ( RowID_t tRowID = 0; tRowID < uRows; tRowID++ )
{
for ( int iColumnarAttr = 0; iColumnarAttr < dIterators.GetLength(); iColumnarAttr++ )
{
auto & tIterator = dIterators[iColumnarAttr];
if ( tIterator.first )
SetColumnarAttr ( iColumnarAttr, tIterator.second, pBuilder, tIterator.first, tRowID, dTmp );
else
SetDefaultColumnarAttr ( iColumnarAttr, tIterator.second, pBuilder );
}
}
if ( pBuilder && !pBuilder->Done(sErrorSTL) )
{
sError = sErrorSTL.c_str();
return false;
}
return true;
}
bool IndexAlterHelper_c::Alter_AddRemoveFromSchema ( CSphSchema & tSchema, const AttrAddRemoveCtx_t & tCtx, bool bAdd, CSphString & sError ) const
{
if ( bAdd && ( tCtx.m_uFlags & CSphColumnInfo::ATTR_COLUMNAR ) )
{
if ( !IsColumnarLibLoaded() )
{
sError.SetSprintf ( "Unable to add columnar attribute '%s': columnar library not loaded", tCtx.m_sName.cstr() );
return false;
}
if ( tCtx.m_eType==SPH_ATTR_JSON )
{
sError.SetSprintf ( "Unable to add columnar attribute '%s': JSON attribute type is not supported in columnar storage", tCtx.m_sName.cstr() );
return false;
}
}
if ( bAdd )
{
AddToSchema ( tSchema, tCtx, sError );
return true;
}
return RemoveFromSchema ( tSchema, tCtx.m_sName, tCtx.m_eType, sError );
}
bool IndexAlterHelper_c::Alter_AddRemoveFieldFromSchema ( bool bAdd, CSphSchema & tSchema, const CSphString & sFieldName, DWORD uFieldFlags, CSphString & sError )
{
if ( bAdd )
{
if ( tSchema.GetFieldsCount() >= SPH_MAX_FIELDS )
{
sError.SetSprintf ( "Can not alter: table can't have more than %d full-text fields.", SPH_MAX_FIELDS );
return false;
}
CSphColumnInfo tField;
tField.m_sName = sFieldName;
tField.m_uFieldFlags = uFieldFlags;
//tField.m_bPayload = false; // fixme? support it or not?
tSchema.AddField(tField);
return true;
}
else
{
auto iIdx = tSchema.GetFieldIndex ( sFieldName.cstr () );
tSchema.RemoveField ( iIdx );
return true;
}
}
void IndexAlterHelper_c::Alter_AddRemoveFromDocstore ( DocstoreBuilder_i & tBuilder, const Docstore_i * pDocstore, DWORD uNumDocs, const CSphSchema & tNewSchema )
{
struct Field_t
{
CSphString m_sName;
bool m_bField = true;
int m_iOldId = -1;
int m_iRsetId = -1;
};
CSphVector<Field_t> dStoredFields;
for ( int i = 0; i < tNewSchema.GetFieldsCount(); i++ )
if ( tNewSchema.IsFieldStored(i) )
{
const CSphString & sName = tNewSchema.GetFieldName(i);
int iFieldId = pDocstore ? pDocstore->GetFieldId ( sName, DOCSTORE_TEXT ) : -1;
dStoredFields.Add ( { sName, true, iFieldId, -1 } );
}
for ( int i = 0; i < tNewSchema.GetAttrsCount(); i++ )
if ( tNewSchema.IsAttrStored(i) )
{
const CSphString & sName = tNewSchema.GetAttr(i).m_sName;
int iFieldId = pDocstore ? pDocstore->GetFieldId ( sName, DOCSTORE_ATTR ) : -1;
dStoredFields.Add ( { sName, false, iFieldId, -1 } );
}
IntVec_t dStoredFieldIds;
for ( auto & i : dStoredFields )
{
if ( i.m_iOldId>=0 )
{
i.m_iRsetId = dStoredFieldIds.GetLength();
dStoredFieldIds.Add ( i.m_iOldId );
}
tBuilder.AddField ( i.m_sName, i.m_bField ? DOCSTORE_TEXT : DOCSTORE_ATTR );
}
DocstoreDoc_t tOldDoc;
DocstoreBuilder_i::Doc_t tNewDoc;
tNewDoc.m_dFields.Resize ( dStoredFields.GetLength() );
for ( RowID_t tRowID = 0; tRowID < uNumDocs; tRowID++ )
{
if ( pDocstore )
tOldDoc = pDocstore->GetDoc ( tRowID, &dStoredFieldIds, -1, false );
ARRAY_FOREACH ( i, dStoredFields )
{
const Field_t & tField = dStoredFields[i];
if ( tField.m_iRsetId!=-1 )
tNewDoc.m_dFields[i] = tOldDoc.m_dFields[tField.m_iRsetId];
else
tNewDoc.m_dFields[i] = {nullptr,0};
}
tBuilder.AddDoc ( tRowID, tNewDoc );
}
tBuilder.Finalize();
}
//////////////////////////////////////////////////////////////////////////
std::unique_ptr<WriteWrapper_c> CreateWriteWrapperDisk ( CSphWriter & tWriter )
{
return std::make_unique<WriteWrapper_Disk_c>(tWriter);
}
std::unique_ptr<WriteWrapper_c> CreateWriteWrapperMem ( CSphTightVector<CSphRowitem> & dSPA )
{
return std::make_unique<WriteWrapper_Mem_T<CSphRowitem>>(dSPA);
}
std::unique_ptr<WriteWrapper_c> CreateWriteWrapperMem ( CSphTightVector<BYTE> & dSPB )
{
return std::make_unique<WriteWrapper_Mem_T<BYTE>>(dSPB);
}
| 16,762
|
C++
|
.cpp
| 451
| 34.558758
| 251
| 0.709877
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,908
|
exprremap.cpp
|
manticoresoftware_manticoresearch/src/exprremap.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "exprremap.h"
#include "sphinx.h"
#include "exprtraits.h"
#include "grouper.h"
#include "sphinxjson.h"
//////////////////////////////////////////////////////////////////////////
// expression that transform string pool base + offset -> ptr
class ExprSortStringAttrFixup_c : public BlobPool_c, public ISphExpr
{
public:
explicit ExprSortStringAttrFixup_c ( const CSphAttrLocator & tLocator ) : m_tLocator ( tLocator ) {}
float Eval ( const CSphMatch & ) const override { assert ( 0 ); return 0.0f; }
const BYTE * StringEvalPacked ( const CSphMatch & tMatch ) const override;
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) override { sphFixupLocator ( m_tLocator, pOldSchema, pNewSchema ); }
void Command ( ESphExprCommand eCmd, void * pArg ) override;
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) override;
ISphExpr * Clone() const final { return new ExprSortStringAttrFixup_c ( *this ); }
public:
CSphAttrLocator m_tLocator; ///< string attribute to fix
private:
ExprSortStringAttrFixup_c ( const ExprSortStringAttrFixup_c & rhs ) : m_tLocator ( rhs.m_tLocator ) {}
};
const BYTE * ExprSortStringAttrFixup_c::StringEvalPacked ( const CSphMatch & tMatch ) const
{
// our blob strings are not null-terminated!
// we can either store nulls in .SPB or add them here
return sphPackPtrAttr ( sphGetBlobAttr ( tMatch, m_tLocator, GetBlobPool() ) );
}
void ExprSortStringAttrFixup_c::Command ( ESphExprCommand eCmd, void * pArg )
{
if ( eCmd==SPH_EXPR_SET_BLOB_POOL )
SetBlobPool( (const BYTE*)pArg);
}
uint64_t ExprSortStringAttrFixup_c::GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME_NOCHECK("ExprSortStringAttrFixup_c");
uHash = sphFNV64 ( &m_tLocator, sizeof(m_tLocator), uHash );
return CALC_DEP_HASHES();
}
// expression that transform string pool base + offset -> ptr
class ExprSortJson2StringPtr_c : public BlobPool_c, public ISphExpr
{
public:
ExprSortJson2StringPtr_c ( const CSphAttrLocator & tLocator, ISphExpr * pExpr );
bool IsDataPtrAttr () const final { return true; }
float Eval ( const CSphMatch & ) const override { assert ( 0 ); return 0.0f; }
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const override;
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) override;
void Command ( ESphExprCommand eCmd, void * pArg ) override;
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) override;
ISphExpr * Clone() const final { return new ExprSortJson2StringPtr_c ( *this ); }
private:
CSphAttrLocator m_tJsonCol; ///< JSON attribute to fix
ISphExprRefPtr_c m_pExpr;
private:
ExprSortJson2StringPtr_c ( const ExprSortJson2StringPtr_c & rhs );
};
ExprSortJson2StringPtr_c::ExprSortJson2StringPtr_c ( const CSphAttrLocator & tLocator, ISphExpr * pExpr )
: m_tJsonCol ( tLocator )
, m_pExpr ( pExpr )
{
if ( pExpr ) // adopt the expression
pExpr->AddRef();
}
int ExprSortJson2StringPtr_c::StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const
{
if ( !GetBlobPool() || !m_pExpr )
{
*ppStr = nullptr;
return 0;
}
uint64_t uPacked = m_pExpr->Int64Eval ( tMatch );
CSphString sResult = FormatJsonAsSortStr ( GetBlobPool() + sphJsonUnpackOffset(uPacked), sphJsonUnpackType(uPacked) );
int iStrLen = sResult.Length();
*ppStr = (const BYTE *)sResult.Leak();
return iStrLen;
}
void ExprSortJson2StringPtr_c::FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema )
{
sphFixupLocator ( m_tJsonCol, pOldSchema, pNewSchema );
if ( m_pExpr )
m_pExpr->FixupLocator ( pOldSchema, pNewSchema );
}
void ExprSortJson2StringPtr_c::Command ( ESphExprCommand eCmd, void * pArg )
{
if ( eCmd==SPH_EXPR_SET_BLOB_POOL )
{
SetBlobPool((const BYTE*)pArg);
if ( m_pExpr )
m_pExpr->Command ( eCmd, pArg );
}
}
uint64_t ExprSortJson2StringPtr_c::GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME_NOCHECK("ExprSortJson2StringPtr_c");
CALC_CHILD_HASH(m_pExpr);
// uHash = sphFNV64 ( &m_tJsonCol, sizeof ( m_tJsonCol ), uHash ); //< that is wrong! Locator may have padding uninitialized data, valgrind will warn!
uHash = sphCalcLocatorHash ( m_tJsonCol, uHash ); //< that is right, only meaningful fields processed without padding.
return CALC_DEP_HASHES();
}
ExprSortJson2StringPtr_c::ExprSortJson2StringPtr_c ( const ExprSortJson2StringPtr_c & rhs )
: m_tJsonCol ( rhs.m_tJsonCol )
, m_pExpr ( SafeClone (rhs.m_pExpr) )
{}
///////////////////////////////////////////////////////////////////////////////
ISphExpr * CreateExprSortStringFixup ( const CSphAttrLocator & tLoc )
{
return new ExprSortStringAttrFixup_c(tLoc);
}
ISphExpr * CreateExprSortJson2String ( const CSphAttrLocator & tLoc, ISphExpr * pExpr )
{
return new ExprSortJson2StringPtr_c ( tLoc, pExpr );
}
| 5,474
|
C++
|
.cpp
| 124
| 42.233871
| 154
| 0.727033
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,909
|
hyperloglog.cpp
|
manticoresoftware_manticoresearch/src/hyperloglog.cpp
|
//
// Copyright (c) 2023-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "hyperloglog.h"
double LinearCounting ( int iV, int iM )
{
return -double(iM)*log ( double(iV)/iM );
}
/////////////////////////////////////////////////////////////////////
RegistersNonPacked_c::RegistersNonPacked_c ( int iBitsPerBucket, int iBuckets )
: m_dValues ( iBuckets )
, m_uMaxValue ( m_dValues.GetLength()-1 )
{
Reset();
}
void RegistersNonPacked_c::Reset()
{
m_dValues.ZeroVec();
m_iUsed = 0;
}
/////////////////////////////////////////////////////////////////////
// See "Appendix to HyperLogLog in Practice: Algorithmic Engineering of a State of the Art Cardinality Estimation Algorithm"
// https://docs.google.com/document/d/1gyjfMHy43U9OWBXxfaeG-3MjGzejW1dlpyMwEYAAWEI/view?fullscreen
const std::vector<std::vector<double>> g_dRawEstimateData =
{
// precision 14
{ 11817.475, 12015.0046, 12215.3792, 12417.7504, 12623.1814, 12830.0086, 13040.0072, 13252.503, 13466.178, 13683.2738, 13902.0344, 14123.9798, 14347.394, 14573.7784, 14802.6894, 15033.6824, 15266.9134, 15502.8624, 15741.4944, 15980.7956, 16223.8916, 16468.6316, 16715.733, 16965.5726, 17217.204, 17470.666, 17727.8516, 17986.7886, 18247.6902, 18510.9632, 18775.304, 19044.7486, 19314.4408, 19587.202, 19862.2576, 20135.924, 20417.0324, 20697.9788, 20979.6112, 21265.0274, 21550.723, 21841.6906, 22132.162, 22428.1406, 22722.127, 23020.5606, 23319.7394, 23620.4014, 23925.2728, 24226.9224, 24535.581, 24845.505, 25155.9618, 25470.3828, 25785.9702, 26103.7764, 26420.4132, 26742.0186, 27062.8852, 27388.415, 27714.6024, 28042.296, 28365.4494, 28701.1526, 29031.8008, 29364.2156, 29704.497, 30037.1458, 30380.111, 30723.8168, 31059.5114, 31404.9498, 31751.6752, 32095.2686, 32444.7792, 32794.767, 33145.204, 33498.4226, 33847.6502, 34209.006, 34560.849, 34919.4838, 35274.9778, 35635.1322, 35996.3266, 36359.1394, 36722.8266, 37082.8516, 37447.7354, 37815.9606, 38191.0692, 38559.4106, 38924.8112, 39294.6726, 39663.973, 40042.261, 40416.2036, 40779.2036, 41161.6436, 41540.9014, 41921.1998, 42294.7698, 42678.5264, 43061.3464, 43432.375, 43818.432, 44198.6598, 44583.0138, 44970.4794, 45353.924, 45729.858, 46118.2224, 46511.5724, 46900.7386, 47280.6964, 47668.1472, 48055.6796, 48446.9436, 48838.7146, 49217.7296, 49613.7796, 50010.7508, 50410.0208, 50793.7886, 51190.2456, 51583.1882, 51971.0796, 52376.5338, 52763.319, 53165.5534, 53556.5594, 53948.2702, 54346.352, 54748.7914, 55138.577, 55543.4824, 55941.1748, 56333.7746, 56745.1552, 57142.7944, 57545.2236, 57935.9956, 58348.5268, 58737.5474, 59158.5962, 59542.6896, 59958.8004, 60349.3788, 60755.0212, 61147.6144, 61548.194, 61946.0696, 62348.6042, 62763.603, 63162.781, 63560.635, 63974.3482, 64366.4908, 64771.5876, 65176.7346, 65597.3916, 65995.915, 66394.0384, 66822.9396, 67203.6336, 67612.2032, 68019.0078, 68420.0388, 68821.22, 69235.8388, 69640.0724, 70055.155, 70466.357, 70863.4266, 71276.2482, 71677.0306, 72080.2006, 72493.0214, 72893.5952, 73314.5856, 73714.9852, 74125.3022, 74521.2122, 74933.6814, 75341.5904, 75743.0244, 76166.0278, 76572.1322, 76973.1028, 77381.6284, 77800.6092, 78189.328, 78607.0962, 79012.2508, 79407.8358, 79825.725, 80238.701, 80646.891, 81035.6436, 81460.0448, 81876.3884, },
// precision 15
{ 23635.0036, 24030.8034, 24431.4744, 24837.1524, 25246.7928, 25661.326, 26081.3532, 26505.2806, 26933.9892, 27367.7098, 27805.318, 28248.799, 28696.4382, 29148.8244, 29605.5138, 30066.8668, 30534.2344, 31006.32, 31480.778, 31962.2418, 32447.3324, 32938.0232, 33432.731, 33930.728, 34433.9896, 34944.1402, 35457.5588, 35974.5958, 36497.3296, 37021.9096, 37554.326, 38088.0826, 38628.8816, 39171.3192, 39723.2326, 40274.5554, 40832.3142, 41390.613, 41959.5908, 42532.5466, 43102.0344, 43683.5072, 44266.694, 44851.2822, 45440.7862, 46038.0586, 46640.3164, 47241.064, 47846.155, 48454.7396, 49076.9168, 49692.542, 50317.4778, 50939.65, 51572.5596, 52210.2906, 52843.7396, 53481.3996, 54127.236, 54770.406, 55422.6598, 56078.7958, 56736.7174, 57397.6784, 58064.5784, 58730.308, 59404.9784, 60077.0864, 60751.9158, 61444.1386, 62115.817, 62808.7742, 63501.4774, 64187.5454, 64883.6622, 65582.7468, 66274.5318, 66976.9276, 67688.7764, 68402.138, 69109.6274, 69822.9706, 70543.6108, 71265.5202, 71983.3848, 72708.4656, 73433.384, 74158.4664, 74896.4868, 75620.9564, 76362.1434, 77098.3204, 77835.7662, 78582.6114, 79323.9902, 80067.8658, 80814.9246, 81567.0136, 82310.8536, 83061.9952, 83821.4096, 84580.8608, 85335.547, 86092.5802, 86851.6506, 87612.311, 88381.2016, 89146.3296, 89907.8974, 90676.846, 91451.4152, 92224.5518, 92995.8686, 93763.5066, 94551.2796, 95315.1944, 96096.1806, 96881.0918, 97665.679, 98442.68, 99229.3002, 100011.0994, 100790.6386, 101580.1564, 102377.7484, 103152.1392, 103944.2712, 104730.216, 105528.6336, 106324.9398, 107117.6706, 107890.3988, 108695.2266, 109485.238, 110294.7876, 111075.0958, 111878.0496, 112695.2864, 113464.5486, 114270.0474, 115068.608, 115884.3626, 116673.2588, 117483.3716, 118275.097, 119085.4092, 119879.2808, 120687.5868, 121499.9944, 122284.916, 123095.9254, 123912.5038, 124709.0454, 125503.7182, 126323.259, 127138.9412, 127943.8294, 128755.646, 129556.5354, 130375.3298, 131161.4734, 131971.1962, 132787.5458, 133588.1056, 134431.351, 135220.2906, 136023.398, 136846.6558, 137667.0004, 138463.663, 139283.7154, 140074.6146, 140901.3072, 141721.8548, 142543.2322, 143356.1096, 144173.7412, 144973.0948, 145794.3162, 146609.5714, 147420.003, 148237.9784, 149050.5696, 149854.761, 150663.1966, 151494.0754, 152313.1416, 153112.6902, 153935.7206, 154746.9262, 155559.547, 156401.9746, 157228.7036, 158008.7254, 158820.75, 159646.9184, 160470.4458, 161279.5348, 162093.3114, 162918.542, 163729.2842, },
// precision 16
{ 47271, 48062.3584, 48862.7074, 49673.152, 50492.8416, 51322.9514, 52161.03, 53009.407, 53867.6348, 54734.206, 55610.5144, 56496.2096, 57390.795, 58297.268, 59210.6448, 60134.665, 61068.0248, 62010.4472, 62962.5204, 63923.5742, 64895.0194, 65876.4182, 66862.6136, 67862.6968, 68868.8908, 69882.8544, 70911.271, 71944.0924, 72990.0326, 74040.692, 75100.6336, 76174.7826, 77252.5998, 78340.2974, 79438.2572, 80545.4976, 81657.2796, 82784.6336, 83915.515, 85059.7362, 86205.9368, 87364.4424, 88530.3358, 89707.3744, 90885.9638, 92080.197, 93275.5738, 94479.391, 95695.918, 96919.2236, 98148.4602, 99382.3474, 100625.6974, 101878.0284, 103141.6278, 104409.4588, 105686.2882, 106967.5402, 108261.6032, 109548.1578, 110852.0728, 112162.231, 113479.0072, 114806.2626, 116137.9072, 117469.5048, 118813.5186, 120165.4876, 121516.2556, 122875.766, 124250.5444, 125621.2222, 127003.2352, 128387.848, 129775.2644, 131181.7776, 132577.3086, 133979.9458, 135394.1132, 136800.9078, 138233.217, 139668.5308, 141085.212, 142535.2122, 143969.0684, 145420.2872, 146878.1542, 148332.7572, 149800.3202, 151269.66, 152743.6104, 154213.0948, 155690.288, 157169.4246, 158672.1756, 160160.059, 161650.6854, 163145.7772, 164645.6726, 166159.1952, 167682.1578, 169177.3328, 170700.0118, 172228.8964, 173732.6664, 175265.5556, 176787.799, 178317.111, 179856.6914, 181400.865, 182943.4612, 184486.742, 186033.4698, 187583.7886, 189148.1868, 190688.4526, 192250.1926, 193810.9042, 195354.2972, 196938.7682, 198493.5898, 200079.2824, 201618.912, 203205.5492, 204765.5798, 206356.1124, 207929.3064, 209498.7196, 211086.229, 212675.1324, 214256.7892, 215826.2392, 217412.8474, 218995.6724, 220618.6038, 222207.1166, 223781.0364, 225387.4332, 227005.7928, 228590.4336, 230217.8738, 231805.1054, 233408.9, 234995.3432, 236601.4956, 238190.7904, 239817.2548, 241411.2832, 243002.4066, 244640.1884, 246255.3128, 247849.3508, 249479.9734, 251106.8822, 252705.027, 254332.9242, 255935.129, 257526.9014, 259154.772, 260777.625, 262390.253, 264004.4906, 265643.59, 267255.4076, 268873.426, 270470.7252, 272106.4804, 273722.4456, 275337.794, 276945.7038, 278592.9154, 280204.3726, 281841.1606, 283489.171, 285130.1716, 286735.3362, 288364.7164, 289961.1814, 291595.5524, 293285.683, 294899.6668, 296499.3434, 298128.0462, 299761.8946, 301394.2424, 302997.6748, 304615.1478, 306269.7724, 307886.114, 309543.1028, 311153.2862, 312782.8546, 314421.2008, 316033.2438, 317692.9636, 319305.2648, 320948.7406, 322566.3364, 324228.4224, 325847.1542, },
// precision 17
{ 94542, 96125.811, 97728.019, 99348.558, 100987.9705, 102646.7565, 104324.5125, 106021.7435, 107736.7865, 109469.272, 111223.9465, 112995.219, 114787.432, 116593.152, 118422.71, 120267.2345, 122134.6765, 124020.937, 125927.2705, 127851.255, 129788.9485, 131751.016, 133726.8225, 135722.592, 137736.789, 139770.568, 141821.518, 143891.343, 145982.1415, 148095.387, 150207.526, 152355.649, 154515.6415, 156696.05, 158887.7575, 161098.159, 163329.852, 165569.053, 167837.4005, 170121.6165, 172420.4595, 174732.6265, 177062.77, 179412.502, 181774.035, 184151.939, 186551.6895, 188965.691, 191402.8095, 193857.949, 196305.0775, 198774.6715, 201271.2585, 203764.78, 206299.3695, 208818.1365, 211373.115, 213946.7465, 216532.076, 219105.541, 221714.5375, 224337.5135, 226977.5125, 229613.0655, 232270.2685, 234952.2065, 237645.3555, 240331.1925, 243034.517, 245756.0725, 248517.6865, 251232.737, 254011.3955, 256785.995, 259556.44, 262368.335, 265156.911, 267965.266, 270785.583, 273616.0495, 276487.4835, 279346.639, 282202.509, 285074.3885, 287942.2855, 290856.018, 293774.0345, 296678.5145, 299603.6355, 302552.6575, 305492.9785, 308466.8605, 311392.581, 314347.538, 317319.4295, 320285.9785, 323301.7325, 326298.3235, 329301.3105, 332301.987, 335309.791, 338370.762, 341382.923, 344431.1265, 347464.1545, 350507.28, 353619.2345, 356631.2005, 359685.203, 362776.7845, 365886.488, 368958.2255, 372060.6825, 375165.4335, 378237.935, 381328.311, 384430.5225, 387576.425, 390683.242, 393839.648, 396977.8425, 400101.9805, 403271.296, 406409.8425, 409529.5485, 412678.7, 415847.423, 419020.8035, 422157.081, 425337.749, 428479.6165, 431700.902, 434893.1915, 438049.582, 441210.5415, 444379.2545, 447577.356, 450741.931, 453959.548, 457137.0935, 460329.846, 463537.4815, 466732.3345, 469960.5615, 473164.681, 476347.6345, 479496.173, 482813.1645, 486025.6995, 489249.4885, 492460.1945, 495675.8805, 498908.0075, 502131.802, 505374.3855, 508550.9915, 511806.7305, 515026.776, 518217.0005, 521523.9855, 524705.9855, 527950.997, 531210.0265, 534472.497, 537750.7315, 540926.922, 544207.094, 547429.4345, 550666.3745, 553975.3475, 557150.7185, 560399.6165, 563662.697, 566916.7395, 570146.1215, 573447.425, 576689.6245, 579874.5745, 583202.337, 586503.0255, 589715.635, 592910.161, 596214.3885, 599488.035, 602740.92, 605983.0685, 609248.67, 612491.3605, 615787.912, 619107.5245, 622307.9555, 625577.333, 628840.4385, 632085.2155, 635317.6135, 638691.7195, 641887.467, 645139.9405, 648441.546, 651666.252, 654941.845, },
// precision 18
{ 189084, 192250.913, 195456.774, 198696.946, 201977.762, 205294.444, 208651.754, 212042.099, 215472.269, 218941.91, 222443.912, 225996.845, 229568.199, 233193.568, 236844.457, 240543.233, 244279.475, 248044.27, 251854.588, 255693.2, 259583.619, 263494.621, 267445.385, 271454.061, 275468.769, 279549.456, 283646.446, 287788.198, 291966.099, 296181.164, 300431.469, 304718.618, 309024.004, 313393.508, 317760.803, 322209.731, 326675.061, 331160.627, 335654.47, 340241.442, 344841.833, 349467.132, 354130.629, 358819.432, 363574.626, 368296.587, 373118.482, 377914.93, 382782.301, 387680.669, 392601.981, 397544.323, 402529.115, 407546.018, 412593.658, 417638.657, 422762.865, 427886.169, 433017.167, 438213.273, 443441.254, 448692.421, 453937.533, 459239.049, 464529.569, 469910.083, 475274.03, 480684.473, 486070.26, 491515.237, 496995.651, 502476.617, 507973.609, 513497.19, 519083.233, 524726.509, 530305.505, 535945.728, 541584.404, 547274.055, 552967.236, 558667.862, 564360.216, 570128.148, 575965.08, 581701.952, 587532.523, 593361.144, 599246.128, 605033.418, 610958.779, 616837.117, 622772.818, 628672.04, 634675.369, 640574.831, 646585.739, 652574.547, 658611.217, 664642.684, 670713.914, 676737.681, 682797.313, 688837.897, 694917.874, 701009.882, 707173.648, 713257.254, 719415.392, 725636.761, 731710.697, 737906.209, 744103.074, 750313.39, 756504.185, 762712.579, 768876.985, 775167.859, 781359, 787615.959, 793863.597, 800245.477, 806464.582, 812785.294, 819005.925, 825403.057, 831676.197, 837936.284, 844266.968, 850642.711, 856959.756, 863322.774, 869699.931, 876102.478, 882355.787, 888694.463, 895159.952, 901536.143, 907872.631, 914293.672, 920615.14, 927130.974, 933409.404, 939922.178, 946331.47, 952745.93, 959209.264, 965590.224, 972077.284, 978501.961, 984953.19, 991413.271, 997817.479, 1004222.658, 1010725.676, 1017177.138, 1023612.529, 1030098.236, 1036493.719, 1043112.207, 1049537.036, 1056008.096, 1062476.184, 1068942.337, 1075524.95, 1081932.864, 1088426.025, 1094776.005, 1101327.448, 1107901.673, 1114423.639, 1120884.602, 1127324.923, 1133794.24, 1140328.886, 1146849.376, 1153346.682, 1159836.502, 1166478.703, 1172953.304, 1179391.502, 1185950.982, 1192544.052, 1198913.41, 1205430.994, 1212015.525, 1218674.042, 1225121.683, 1231551.101, 1238126.379, 1244673.795, 1251260.649, 1257697.86, 1264320.983, 1270736.319, 1277274.694, 1283804.95, 1290211.514, 1296858.568, 1303455.691, }
};
const std::vector<std::vector<double>> g_dBiasData =
{
// precision 14
{ 11816.475, 11605.0046, 11395.3792, 11188.7504, 10984.1814, 10782.0086, 10582.0072, 10384.503, 10189.178, 9996.2738, 9806.0344, 9617.9798, 9431.394, 9248.7784, 9067.6894, 8889.6824, 8712.9134, 8538.8624, 8368.4944, 8197.7956, 8031.8916, 7866.6316, 7703.733, 7544.5726, 7386.204, 7230.666, 7077.8516, 6926.7886, 6778.6902, 6631.9632, 6487.304, 6346.7486, 6206.4408, 6070.202, 5935.2576, 5799.924, 5671.0324, 5541.9788, 5414.6112, 5290.0274, 5166.723, 5047.6906, 4929.162, 4815.1406, 4699.127, 4588.5606, 4477.7394, 4369.4014, 4264.2728, 4155.9224, 4055.581, 3955.505, 3856.9618, 3761.3828, 3666.9702, 3575.7764, 3482.4132, 3395.0186, 3305.8852, 3221.415, 3138.6024, 3056.296, 2970.4494, 2896.1526, 2816.8008, 2740.2156, 2670.497, 2594.1458, 2527.111, 2460.8168, 2387.5114, 2322.9498, 2260.6752, 2194.2686, 2133.7792, 2074.767, 2015.204, 1959.4226, 1898.6502, 1850.006, 1792.849, 1741.4838, 1687.9778, 1638.1322, 1589.3266, 1543.1394, 1496.8266, 1447.8516, 1402.7354, 1361.9606, 1327.0692, 1285.4106, 1241.8112, 1201.6726, 1161.973, 1130.261, 1094.2036, 1048.2036, 1020.6436, 990.901400000002, 961.199800000002, 924.769800000002, 899.526400000002, 872.346400000002, 834.375, 810.432000000001, 780.659800000001, 756.013800000001, 733.479399999997, 707.923999999999, 673.858, 652.222399999999, 636.572399999997, 615.738599999997, 586.696400000001, 564.147199999999, 541.679600000003, 523.943599999999, 505.714599999999, 475.729599999999, 461.779600000002, 449.750800000002, 439.020799999998, 412.7886, 400.245600000002, 383.188199999997, 362.079599999997, 357.533799999997, 334.319000000003, 327.553399999997, 308.559399999998, 291.270199999999, 279.351999999999, 271.791400000002, 252.576999999997, 247.482400000001, 236.174800000001, 218.774599999997, 220.155200000001, 208.794399999999, 201.223599999998, 182.995600000002, 185.5268, 164.547400000003, 176.5962, 150.689599999998, 157.8004, 138.378799999999, 134.021200000003, 117.614399999999, 108.194000000003, 97.0696000000025, 89.6042000000016, 95.6030000000028, 84.7810000000027, 72.635000000002, 77.3482000000004, 59.4907999999996, 55.5875999999989, 50.7346000000034, 61.3916000000027, 50.9149999999936, 39.0384000000049, 58.9395999999979, 29.633600000001, 28.2032000000036, 26.0078000000067, 17.0387999999948, 9.22000000000116, 13.8387999999977, 8.07240000000456, 14.1549999999988, 15.3570000000036, 3.42660000000615, 6.24820000000182, -2.96940000000177, -8.79940000000352, -5.97860000000219, -14.4048000000039, -3.4143999999942, -13.0148000000045, -11.6977999999945, -25.7878000000055, -22.3185999999987, -24.409599999999, -31.9756000000052, -18.9722000000038, -22.8678000000073, -30.8972000000067, -32.3715999999986, -22.3907999999938, -43.6720000000059, -35.9038, -39.7492000000057, -54.1641999999993, -45.2749999999942, -42.2989999999991, -44.1089999999967, -64.3564000000042, -49.9551999999967, -42.6116000000038, },
// precision 15
{ 23634.0036, 23210.8034, 22792.4744, 22379.1524, 21969.7928, 21565.326, 21165.3532, 20770.2806, 20379.9892, 19994.7098, 19613.318, 19236.799, 18865.4382, 18498.8244, 18136.5138, 17778.8668, 17426.2344, 17079.32, 16734.778, 16397.2418, 16063.3324, 15734.0232, 15409.731, 15088.728, 14772.9896, 14464.1402, 14157.5588, 13855.5958, 13559.3296, 13264.9096, 12978.326, 12692.0826, 12413.8816, 12137.3192, 11870.2326, 11602.5554, 11340.3142, 11079.613, 10829.5908, 10583.5466, 10334.0344, 10095.5072, 9859.694, 9625.2822, 9395.7862, 9174.0586, 8957.3164, 8738.064, 8524.155, 8313.7396, 8116.9168, 7913.542, 7718.4778, 7521.65, 7335.5596, 7154.2906, 6968.7396, 6786.3996, 6613.236, 6437.406, 6270.6598, 6107.7958, 5945.7174, 5787.6784, 5635.5784, 5482.308, 5337.9784, 5190.0864, 5045.9158, 4919.1386, 4771.817, 4645.7742, 4518.4774, 4385.5454, 4262.6622, 4142.74679999999, 4015.5318, 3897.9276, 3790.7764, 3685.13800000001, 3573.6274, 3467.9706, 3368.61079999999, 3271.5202, 3170.3848, 3076.4656, 2982.38400000001, 2888.4664, 2806.4868, 2711.9564, 2634.1434, 2551.3204, 2469.7662, 2396.61139999999, 2318.9902, 2243.8658, 2171.9246, 2105.01360000001, 2028.8536, 1960.9952, 1901.4096, 1841.86079999999, 1777.54700000001, 1714.5802, 1654.65059999999, 1596.311, 1546.2016, 1492.3296, 1433.8974, 1383.84600000001, 1339.4152, 1293.5518, 1245.8686, 1193.50659999999, 1162.27959999999, 1107.19439999999, 1069.18060000001, 1035.09179999999, 999.679000000004, 957.679999999993, 925.300199999998, 888.099400000006, 848.638600000006, 818.156400000007, 796.748399999997, 752.139200000005, 725.271200000003, 692.216, 671.633600000001, 647.939799999993, 621.670599999998, 575.398799999995, 561.226599999995, 532.237999999998, 521.787599999996, 483.095799999996, 467.049599999998, 465.286399999997, 415.548599999995, 401.047399999996, 380.607999999993, 377.362599999993, 347.258799999996, 338.371599999999, 310.096999999994, 301.409199999995, 276.280799999993, 265.586800000005, 258.994399999996, 223.915999999997, 215.925399999993, 213.503800000006, 191.045400000003, 166.718200000003, 166.259000000005, 162.941200000001, 148.829400000002, 141.645999999993, 123.535399999993, 122.329800000007, 89.473399999988, 80.1962000000058, 77.5457999999926, 59.1056000000099, 83.3509999999951, 52.2906000000075, 36.3979999999865, 40.6558000000077, 42.0003999999899, 19.6630000000005, 19.7153999999864, -8.38539999999921, -0.692799999989802, 0.854800000000978, 3.23219999999856, -3.89040000000386, -5.25880000001052, -24.9052000000083, -22.6837999999989, -26.4286000000138, -34.997000000003, -37.0216000000073, -43.430400000012, -58.2390000000014, -68.8034000000043, -56.9245999999985, -57.8583999999973, -77.3097999999882, -73.2793999999994, -81.0738000000129, -87.4530000000086, -65.0254000000132, -57.296399999992, -96.2746000000043, -103.25, -96.081600000005, -91.5542000000132, -102.465200000006, -107.688599999994, -101.458000000013, -109.715800000005, },
// precision 16
{ 47270, 46423.3584, 45585.7074, 44757.152, 43938.8416, 43130.9514, 42330.03, 41540.407, 40759.6348, 39988.206, 39226.5144, 38473.2096, 37729.795, 36997.268, 36272.6448, 35558.665, 34853.0248, 34157.4472, 33470.5204, 32793.5742, 32127.0194, 31469.4182, 30817.6136, 30178.6968, 29546.8908, 28922.8544, 28312.271, 27707.0924, 27114.0326, 26526.692, 25948.6336, 25383.7826, 24823.5998, 24272.2974, 23732.2572, 23201.4976, 22674.2796, 22163.6336, 21656.515, 21161.7362, 20669.9368, 20189.4424, 19717.3358, 19256.3744, 18795.9638, 18352.197, 17908.5738, 17474.391, 17052.918, 16637.2236, 16228.4602, 15823.3474, 15428.6974, 15043.0284, 14667.6278, 14297.4588, 13935.2882, 13578.5402, 13234.6032, 12882.1578, 12548.0728, 12219.231, 11898.0072, 11587.2626, 11279.9072, 10973.5048, 10678.5186, 10392.4876, 10105.2556, 9825.766, 9562.5444, 9294.2222, 9038.2352, 8784.848, 8533.2644, 8301.7776, 8058.30859999999, 7822.94579999999, 7599.11319999999, 7366.90779999999, 7161.217, 6957.53080000001, 6736.212, 6548.21220000001, 6343.06839999999, 6156.28719999999, 5975.15419999999, 5791.75719999999, 5621.32019999999, 5451.66, 5287.61040000001, 5118.09479999999, 4957.288, 4798.4246, 4662.17559999999, 4512.05900000001, 4364.68539999999, 4220.77720000001, 4082.67259999999, 3957.19519999999, 3842.15779999999, 3699.3328, 3583.01180000001, 3473.8964, 3338.66639999999, 3233.55559999999, 3117.799, 3008.111, 2909.69140000001, 2814.86499999999, 2719.46119999999, 2624.742, 2532.46979999999, 2444.7886, 2370.1868, 2272.45259999999, 2196.19260000001, 2117.90419999999, 2023.2972, 1969.76819999999, 1885.58979999999, 1833.2824, 1733.91200000001, 1682.54920000001, 1604.57980000001, 1556.11240000001, 1491.3064, 1421.71960000001, 1371.22899999999, 1322.1324, 1264.7892, 1196.23920000001, 1143.8474, 1088.67240000001, 1073.60380000001, 1023.11660000001, 959.036400000012, 927.433199999999, 906.792799999996, 853.433599999989, 841.873800000001, 791.1054, 756.899999999994, 704.343200000003, 672.495599999995, 622.790399999998, 611.254799999995, 567.283200000005, 519.406599999988, 519.188400000014, 495.312800000014, 451.350799999986, 443.973399999988, 431.882199999993, 392.027000000002, 380.924200000009, 345.128999999986, 298.901400000002, 287.771999999997, 272.625, 247.253000000026, 222.490600000019, 223.590000000026, 196.407599999977, 176.425999999978, 134.725199999986, 132.4804, 110.445599999977, 86.7939999999944, 56.7038000000175, 64.915399999998, 38.3726000000024, 37.1606000000029, 46.170999999973, 49.1716000000015, 15.3362000000197, 6.71639999997569, -34.8185999999987, -39.4476000000141, 12.6830000000191, -12.3331999999937, -50.6565999999875, -59.9538000000175, -65.1054000000004, -70.7576000000117, -106.325200000021, -126.852200000023, -110.227599999984, -132.885999999999, -113.897200000007, -142.713800000027, -151.145399999979, -150.799200000009, -177.756200000003, -156.036399999983, -182.735199999996, -177.259399999981, -198.663600000029, -174.577600000019, -193.84580000001, },
// precision 17
{ 94541, 92848.811, 91174.019, 89517.558, 87879.9705, 86262.7565, 84663.5125, 83083.7435, 81521.7865, 79977.272, 78455.9465, 76950.219, 75465.432, 73994.152, 72546.71, 71115.2345, 69705.6765, 68314.937, 66944.2705, 65591.255, 64252.9485, 62938.016, 61636.8225, 60355.592, 59092.789, 57850.568, 56624.518, 55417.343, 54231.1415, 53067.387, 51903.526, 50774.649, 49657.6415, 48561.05, 47475.7575, 46410.159, 45364.852, 44327.053, 43318.4005, 42325.6165, 41348.4595, 40383.6265, 39436.77, 38509.502, 37594.035, 36695.939, 35818.6895, 34955.691, 34115.8095, 33293.949, 32465.0775, 31657.6715, 30877.2585, 30093.78, 29351.3695, 28594.1365, 27872.115, 27168.7465, 26477.076, 25774.541, 25106.5375, 24452.5135, 23815.5125, 23174.0655, 22555.2685, 21960.2065, 21376.3555, 20785.1925, 20211.517, 19657.0725, 19141.6865, 18579.737, 18081.3955, 17578.995, 17073.44, 16608.335, 16119.911, 15651.266, 15194.583, 14749.0495, 14343.4835, 13925.639, 13504.509, 13099.3885, 12691.2855, 12328.018, 11969.0345, 11596.5145, 11245.6355, 10917.6575, 10580.9785, 10277.8605, 9926.58100000001, 9605.538, 9300.42950000003, 8989.97850000003, 8728.73249999998, 8448.3235, 8175.31050000002, 7898.98700000002, 7629.79100000003, 7413.76199999999, 7149.92300000001, 6921.12650000001, 6677.1545, 6443.28000000003, 6278.23450000002, 6014.20049999998, 5791.20299999998, 5605.78450000001, 5438.48800000001, 5234.2255, 5059.6825, 4887.43349999998, 4682.935, 4496.31099999999, 4322.52250000002, 4191.42499999999, 4021.24200000003, 3900.64799999999, 3762.84250000003, 3609.98050000001, 3502.29599999997, 3363.84250000003, 3206.54849999998, 3079.70000000001, 2971.42300000001, 2867.80349999998, 2727.08100000001, 2630.74900000001, 2496.6165, 2440.902, 2356.19150000002, 2235.58199999999, 2120.54149999999, 2012.25449999998, 1933.35600000003, 1820.93099999998, 1761.54800000001, 1663.09350000002, 1578.84600000002, 1509.48149999999, 1427.3345, 1379.56150000001, 1306.68099999998, 1212.63449999999, 1084.17300000001, 1124.16450000001, 1060.69949999999, 1007.48849999998, 941.194499999983, 879.880500000028, 836.007500000007, 782.802000000025, 748.385499999975, 647.991500000004, 626.730500000005, 570.776000000013, 484.000500000024, 513.98550000001, 418.985499999952, 386.996999999974, 370.026500000036, 355.496999999974, 356.731499999994, 255.92200000002, 259.094000000041, 205.434499999974, 165.374500000034, 197.347500000033, 95.718499999959, 67.6165000000037, 54.6970000000438, 31.7395000000251, -15.8784999999916, 8.42500000004657, -26.3754999999655, -118.425500000012, -66.6629999999423, -42.9745000000112, -107.364999999991, -189.839000000036, -162.611499999999, -164.964999999967, -189.079999999958, -223.931499999948, -235.329999999958, -269.639500000048, -249.087999999989, -206.475499999942, -283.04449999996, -290.667000000016, -304.561499999953, -336.784499999951, -380.386500000022, -283.280499999993, -364.533000000054, -389.059499999974, -364.454000000027, -415.748000000021, -417.155000000028, },
// precision 18
{ 189083, 185696.913, 182348.774, 179035.946, 175762.762, 172526.444, 169329.754, 166166.099, 163043.269, 159958.91, 156907.912, 153906.845, 150924.199, 147996.568, 145093.457, 142239.233, 139421.475, 136632.27, 133889.588, 131174.2, 128511.619, 125868.621, 123265.385, 120721.061, 118181.769, 115709.456, 113252.446, 110840.198, 108465.099, 106126.164, 103823.469, 101556.618, 99308.004, 97124.508, 94937.803, 92833.731, 90745.061, 88677.627, 86617.47, 84650.442, 82697.833, 80769.132, 78879.629, 77014.432, 75215.626, 73384.587, 71652.482, 69895.93, 68209.301, 66553.669, 64921.981, 63310.323, 61742.115, 60205.018, 58698.658, 57190.657, 55760.865, 54331.169, 52908.167, 51550.273, 50225.254, 48922.421, 47614.533, 46362.049, 45098.569, 43926.083, 42736.03, 41593.473, 40425.26, 39316.237, 38243.651, 37170.617, 36114.609, 35084.19, 34117.233, 33206.509, 32231.505, 31318.728, 30403.404, 29540.0550000001, 28679.236, 27825.862, 26965.216, 26179.148, 25462.08, 24645.952, 23922.523, 23198.144, 22529.128, 21762.4179999999, 21134.779, 20459.117, 19840.818, 19187.04, 18636.3689999999, 17982.831, 17439.7389999999, 16874.547, 16358.2169999999, 15835.684, 15352.914, 14823.681, 14329.313, 13816.897, 13342.874, 12880.882, 12491.648, 12021.254, 11625.392, 11293.7610000001, 10813.697, 10456.209, 10099.074, 9755.39000000001, 9393.18500000006, 9047.57900000003, 8657.98499999999, 8395.85900000005, 8033, 7736.95900000003, 7430.59699999995, 7258.47699999996, 6924.58200000005, 6691.29399999999, 6357.92500000005, 6202.05700000003, 5921.19700000004, 5628.28399999999, 5404.96799999999, 5226.71100000001, 4990.75600000005, 4799.77399999998, 4622.93099999998, 4472.478, 4171.78700000001, 3957.46299999999, 3868.95200000005, 3691.14300000004, 3474.63100000005, 3341.67200000002, 3109.14000000001, 3071.97400000005, 2796.40399999998, 2756.17799999996, 2611.46999999997, 2471.93000000005, 2382.26399999997, 2209.22400000005, 2142.28399999999, 2013.96100000001, 1911.18999999994, 1818.27099999995, 1668.47900000005, 1519.65800000005, 1469.67599999998, 1367.13800000004, 1248.52899999998, 1181.23600000003, 1022.71900000004, 1088.20700000005, 959.03600000008, 876.095999999903, 791.183999999892, 703.337000000058, 731.949999999953, 586.86400000006, 526.024999999907, 323.004999999888, 320.448000000091, 340.672999999952, 309.638999999966, 216.601999999955, 102.922999999952, 19.2399999999907, -0.114000000059605, -32.6240000000689, -89.3179999999702, -153.497999999905, -64.2970000000205, -143.695999999996, -259.497999999905, -253.017999999924, -213.948000000091, -397.590000000084, -434.006000000052, -403.475000000093, -297.958000000101, -404.317000000039, -528.898999999976, -506.621000000043, -513.205000000075, -479.351000000024, -596.139999999898, -527.016999999993, -664.681000000099, -680.306000000099, -704.050000000047, -850.486000000034, -757.43200000003, -713.308999999892, }
};
| 28,300
|
C++
|
.cpp
| 55
| 512.909091
| 2,980
| 0.765193
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,910
|
searchdhttpcompat.cpp
|
manticoresoftware_manticoresearch/src/searchdhttpcompat.cpp
|
//
// Copyright (c) 2017-2020, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "sphinx.h"
#include "sphinxint.h"
#include "sphinxjson.h"
#include "sphinxjsonquery.h"
#include "http/http_parser.h"
#include "searchdaemon.h"
#include "searchdha.h"
#include "searchdsql.h"
#include "searchdhttp.h"
#include "client_session.h"
//#undef snprintf
//#undef strtoull
//#undef strtoll
#include "nlohmann/json.hpp"
#include <iostream>
using nljson = nlohmann::json;
#include "http/log_management.h"
static SmallStringHash_T<CSphString> g_hAlias;
static RwLock_t g_tLockAlias;
static RwLock_t g_tLockKbnTable;
static nljson g_tKbnTable = "{}"_json;
static bool g_bEnabled = true;
static CSphString g_sKbnTableName = ".kibana";
static CSphString g_sKbnTableAlias = ".kibana_1";
static std::vector<CSphString> g_dKbnTablesNames { ".kibana_task_manager", ".apm-agent-configuration", g_sKbnTableName };
static nljson::json_pointer g_tConfigTables ( "/dashboards/tables" );
static nljson::json_pointer g_tMode ( "/dashboards/mode" );
static bool LOG_LEVEL_COMPAT = val_from_env ( "MANTICORE_LOG_ES_COMPAT", false ); // verbose logging compat events, ruled by this env variable
#define LOG_COMPONENT_COMPATINFO ""
#define COMPATINFO LOGINFO ( COMPAT, COMPATINFO )
#define CompatWarning( ... ) do if ( LOG_LEVEL_COMPAT ) sphWarning_impl (__VA_ARGS__); while(0)
struct CompatInsert_t;
class HttpCompatHandler_c : public HttpCompatBaseHandler_c
{
public:
HttpCompatHandler_c ( Str_t sBody, int iReqType, const SmallStringHash_T<CSphString> & hOpts );
bool Process () final;
static void SetLogFilter ( const CSphString & sVal );
private:
bool ProcessEndpoints();
void ReportMissedIndex ( const CSphString & sIndex );
void ReportIncorrectMethod ( const char * sAllowed );
void ReportMissedScript ( const CSphString & sIndex );
void EmptyReply();
bool ProcessMSearch ();
bool ProcessSearch();
void ProcessEmptyHead();
bool ProcessKbnTableDoc();
void ProcessCat();
void ProcessAliasGet();
void ProcessILM();
void ProcessCCR();
void ProcessKbnTableGet();
void ProcessCount();
bool ProcessInsert();
void ProcessInsertIntoIdx ( const CompatInsert_t & tIns );
void ProcessKbnTableMGet();
void ProcessPutTemplate();
void ProcessIgnored();
bool ProcessCreateTable();
bool ProcessDeleteDoc();
bool ProcessUpdateDoc();
void ProcessDeleteTable();
void ProcessAliasSet();
void ProcessRefresh ( const CSphString * pName );
void ProcessFields();
static CSphMutex m_tReqStatLock;
static SmallStringHash_T<int> m_tReqStat;
static CSphString m_sLogHttpFilter;
};
Str_t FromStd ( const std::string & sVal ) { return { sVal.c_str(), sVal.length() }; }
//////////////////////////////////////////////////////////////////////////
// compatibility mode
static void ReplaceSubstring ( std::string & s, const std::string & f, const std::string & t )
{
assert ( !f.empty() );
for (auto pos = s.find(f); // find first occurrence of f
pos != std::string::npos; // make sure f was found
s.replace(pos, f.size(), t), // replace with t, and
pos = s.find(f, pos + t.size())) // find next occurrence of f
{}
}
static void CreateKbnTable ( const CSphString & sParent, bool bRoot, const nljson & tProps, CreateTableSettings_t & tOpts )
{
if ( tProps.contains( "properties" ) )
{
CreateKbnTable ( sParent, false, tProps["properties"], tOpts );
return;
}
for ( auto & tVal : tProps.items() )
{
if ( !tVal.value().is_object() )
continue;
CSphString sName;
if ( bRoot )
sName = tVal.key().c_str();
else
sName.SetSprintf ( "%s.%s", sParent.cstr(), tVal.key().c_str() );
if ( tVal.value().contains( "properties" ) )
{
// create all fields from children objects
CreateKbnTable ( sName, false, tVal.value()["properties"], tOpts );
// complex types is simple JSON attribute
if ( bRoot )
{
CreateTableAttr_t & tAttr = tOpts.m_dAttrs.Add();
tAttr.m_tAttr.m_sName = tVal.key().c_str();
tAttr.m_tAttr.m_eAttrType = SPH_ATTR_JSON;
}
continue;
}
if ( !tVal.value().contains( "type" ) )
continue;
std::string sType = tVal.value()["type"].get<std::string>();
// fields at all levels
if ( sType=="text" )
{
CSphColumnInfo & tRaw = tOpts.m_dFields.Add();
tRaw.m_sName = sName;
tRaw.m_uFieldFlags = CSphColumnInfo::FIELD_INDEXED;
}
if ( !bRoot )
continue;
// simple types at top level are attributes
if ( sType=="keyword" || strncmp ( sType.c_str(), "date", sType.size() )==0 )
{
CreateTableAttr_t & tAttr = tOpts.m_dAttrs.Add();
tAttr.m_tAttr.m_sName = sName;
tAttr.m_tAttr.m_eAttrType = SPH_ATTR_STRING;
// keywords also got indexed to allows filter by columns
if ( sType=="keyword" )
{
CSphColumnInfo & tRaw = tOpts.m_dFields.Add();
tRaw.m_sName = sName;
tRaw.m_uFieldFlags = CSphColumnInfo::FIELD_INDEXED;
}
} else if ( sType=="integer" || sType=="long" || sType=="boolean" )
{
CreateTableAttr_t & tAttr = tOpts.m_dAttrs.Add();
tAttr.m_tAttr.m_sName = sName;
tAttr.m_tAttr.m_eAttrType = SPH_ATTR_INTEGER;
} else if ( sType=="object" )
{
CreateTableAttr_t & tAttr = tOpts.m_dAttrs.Add();
tAttr.m_tAttr.m_sName = sName;
tAttr.m_tAttr.m_eAttrType = SPH_ATTR_JSON;
} else
{
std::string sVal = tVal.value().dump();
CompatWarning ( "skipped column '%s' %s", sName.cstr(), sVal.c_str() );
}
}
}
typedef CSphVector< std::pair < nljson::json_pointer, CSphString > > ComplexFields_t;
static void AddComplexField ( const char * sFieldName, ComplexFields_t & dFields )
{
assert ( sFieldName );
const char * sDot = strchr ( sFieldName, '.' );
if ( !sDot )
return;
std::string sName = sFieldName;
ReplaceSubstring ( sName, ".", "/" );
sName = "/" + sName;
auto & tFieldTrait = dFields.Add ();
tFieldTrait.first = nljson::json_pointer ( sName );
tFieldTrait.second = sFieldName;
}
static void CreateKbnTable ( CreateTableSettings_t & tOpts, const nljson & tTbl, ComplexFields_t & dFields )
{
NameValueStr_t & tIdxType = tOpts.m_dOpts.Add();
tIdxType.m_sName = "type";
tIdxType.m_sValue = "rt";
// need for exists bool query
NameValueStr_t & tIdxFL = tOpts.m_dOpts.Add();
tIdxFL.m_sName = "index_field_lengths";
tIdxFL.m_sValue = "1";
CSphColumnInfo & tRaw = tOpts.m_dFields.Add();
tRaw.m_sName = "_raw";
tRaw.m_uFieldFlags = CSphColumnInfo::FIELD_STORED;
CSphColumnInfo & tMissedExists = tOpts.m_dFields.Add();
tMissedExists.m_sName = "missed_exists";
tMissedExists.m_uFieldFlags = CSphColumnInfo::FIELD_INDEXED;
CreateTableAttr_t & tId = tOpts.m_dAttrs.Add();
tId.m_tAttr.m_sName = "_id";
tId.m_tAttr.m_eAttrType = SPH_ATTR_STRING;
CreateTableAttr_t & tVersion = tOpts.m_dAttrs.Add();
tVersion.m_tAttr.m_sName = "_version";
tVersion.m_tAttr.m_eAttrType = SPH_ATTR_INTEGER;
const nljson & tProps = tTbl["mappings"]["properties"];
if ( tProps.is_object() )
{
CSphString sName;
CreateKbnTable ( sName, true, tProps, tOpts );
}
for ( const CSphColumnInfo & tField : tOpts.m_dFields )
AddComplexField ( tField.m_sName.cstr(), dFields );
}
int64_t GetDocID ( const char * sID )
{
uint64_t uDocid = 0;
if ( !sID )
return uDocid;
const char * p = sID;
while ( sphIsInteger ( *p ) )
p++;
// could be document_id with only numbers or a hash that should fold into number
if ( !*p )
{
uDocid = strtoll ( sID, NULL, 10 );
} else
{
uDocid = sphFNV64 ( sID );
}
return ( uDocid & INT64_MAX );
}
static int GetVersion ( const nljson & tDoc )
{
int iVer = 1;
if ( tDoc.contains ( "_version" ) )
iVer = tDoc["_version"].get<int>();
return iVer;
}
static bool InsertDoc ( const SqlStmt_t & tStmt, CSphString & sError );
static bool InsertDoc ( Str_t sSrc, bool bReplace, CSphString & sError )
{
DocID_t tTmpID;
SqlStmt_t tStmt;
if ( !sphParseJsonInsert ( sSrc.first, tStmt, tTmpID, false, sError ) )
return false;
assert ( tTmpID>=0 );
tStmt.m_eStmt = ( bReplace ? STMT_REPLACE : STMT_INSERT );
return InsertDoc ( tStmt, sError );
}
static bool InsertDoc ( const SqlStmt_t & tStmt, CSphString & sError )
{
std::unique_ptr<StmtErrorReporter_i> pReporter ( CreateHttpErrorReporter() );
sphHandleMysqlInsert ( *pReporter.get(), tStmt );
if ( pReporter->IsError() )
{
sError = pReporter->GetError();
return false;
}
return true;
}
static bool InsertDoc ( const CSphString & sIndex, const ComplexFields_t & dFields, const nljson & tSrc, bool bReplace, const char * sId, int iVersion, CSphString & sError )
{
nljson tVal;
tVal["table"] = sIndex.cstr();
tVal["id"] = GetDocID(sId);
tVal["doc"] = tSrc;
tVal["doc"]["_id"] = sId;
tVal["doc"]["_version"] = iVersion;
tVal["doc"]["_raw"] = tSrc;
// add fields from objects
for ( const auto & tField : dFields )
{
if ( tSrc.contains ( tField.first ) )
tVal["doc"][tField.second.cstr()] = tSrc[tField.first].dump().c_str();
}
std::string sSrc = tVal.dump();
return InsertDoc ( FromStd ( sSrc ), bReplace, sError );
}
static void InsertIntoKbnTable ( const CSphString & sIndex, const nljson & tTbl, const ComplexFields_t & dFields )
{
CSphString sError;
[[maybe_unused]] int iDocs = 0;
[[maybe_unused]] int iFailed = 0;
const nljson & tHits = tTbl["hits"];
for ( auto & tDoc : tHits.items() )
{
if ( !tDoc.value().is_object() )
continue;
iDocs++;
const CSphString sRefDocID ( tDoc.value()["_id"].get<std::string>().c_str() );
const nljson & tSrc = tDoc.value()["_source"];
if ( !InsertDoc ( sIndex, dFields, tSrc, false, sRefDocID.cstr(), GetVersion ( tDoc.value() ), sError ) )
{
iFailed++;
CompatWarning ( "doc '%s', error: %s", sRefDocID.cstr(), sError.cstr() );
continue;
}
}
//sphInfo ( "kibana table '%s' docs: inserted %d, failed %d", sIndex.cstr(), (int)iDocs-iFailed, iFailed ); // !COMMIT
}
static void CreateAliases()
{
ScRL_t tLockTbl ( g_tLockKbnTable );
for ( auto & tTbl : g_tKbnTable.items() )
{
if ( !tTbl.value().is_object() || !tTbl.value().contains( "aliases" ) )
continue;
CSphString sName = tTbl.key().c_str();
ScWL_t tLock ( g_tLockAlias );
for ( auto & tAlias : tTbl.value()["aliases"].items() )
{
CSphString sAlias = tAlias.key().c_str();
g_hAlias.Add ( sName, sAlias );
}
}
COMPATINFO << "created " << g_hAlias.GetLength() << " aliases, tables " << g_tKbnTable.size();
}
static void CreateKbnIndexes()
{
ScRL_t tLockTbl ( g_tLockKbnTable );
for ( auto & tTbl : g_tKbnTable.items() )
{
if ( !tTbl.value().is_object() )
continue;
CSphString sName = tTbl.key().c_str();
ComplexFields_t dFields;
if ( tTbl.value().contains( "mappings" ) )
{
StrVec_t dWarnings;
CSphString sError;
if ( !DropIndexInt ( sName, true, sError ) )
CompatWarning ( "%s", sError.cstr() );
CreateTableSettings_t tOpts;
CreateKbnTable ( tOpts, tTbl.value(), dFields );
if ( !CreateNewIndexConfigless ( sName, tOpts, dWarnings, sError ) )
CompatWarning ( "%s", sError.cstr() );
for ( const CSphString & sWarn : dWarnings )
CompatWarning ( "%s", sWarn.cstr() );
COMPATINFO << "created kibana table '" << sName.cstr() << "'";
}
if ( tTbl.value().contains( "hits" ) )
{
InsertIntoKbnTable ( sName, tTbl.value(), dFields );
tTbl.value().erase ( "hits" );
}
}
}
static void CreateScripts();
static void CatColumnsSetup();
void SetupCompatHttp()
{
if ( !IsLogManagementEnabled() )
return;
Threads::CallCoroutine ( [] {
CreateAliases();
CreateScripts();
CatColumnsSetup();
} );
}
void LoadCompatHttp ( const char * sData )
{
nljson tRaw = nljson::parse ( sData, nullptr, false );
int iLoadedItems = 0;
if ( tRaw.contains ( g_tConfigTables ) )
{
ScWL_t tLock ( g_tLockKbnTable );
g_tKbnTable = tRaw[g_tConfigTables];
iLoadedItems = (int)g_tKbnTable.size();
}
if ( tRaw.contains ( g_tMode ) )
g_bEnabled = tRaw[g_tMode].get<bool>();
COMPATINFO << "load compat http complete, loaded " << iLoadedItems << " items, mode " << g_bEnabled;
}
void SaveCompatHttp ( JsonEscapedBuilder & tOut )
{
if ( IsLogManagementEnabled() )
{
JsonObj_c tRaw ( false );
{
ScRL_t tLockTbl ( g_tLockKbnTable );
if ( !g_tKbnTable.size() )
return;
{
JsonObj_c tTable ( g_tKbnTable.dump().c_str() );
tRaw.AddItem ( g_tConfigTables.back().c_str(), tTable );
}
tRaw.AddBool ( g_tMode.back().c_str(), IsLogManagementEnabled() );
}
tOut.Named ( g_tConfigTables.parent_pointer().back().c_str() );
tOut.Appendf ( "%s", tRaw.AsString().cstr() );
}
}
static void DumpHttp ( int iReqType, const CSphString & sURL, Str_t sBody, const VecTraits_T<BYTE> & dResult )
{
if ( !LOG_LEVEL_COMPAT )
return;
JsonEscapedBuilder sReq;
sReq += R"({"request": {)";
sReq.Appendf ( R"( "method": "%s")", http_method_str ( (http_method)iReqType ) );
sReq.Appendf ( R"(, "url": "%s")", sURL.cstr() );
if ( !IsEmpty ( sBody ) )
{
sReq.StartBlock ( nullptr, R"(, "postData": { "text": )", " }" );
sReq.AppendEscaped ( sBody.first, EscBld::eEscape );
sReq.FinishBlock ( false );
}
sReq += "}";
if ( !dResult.IsEmpty() )
{
const BYTE * pReply = (const BYTE *)memchr ( dResult.Begin(), '{', dResult.GetLength() );
if ( pReply )
{
int iLen = dResult.GetLength() - ( pReply - dResult.Begin() );
sReq.StartBlock ( nullptr, R"( , "reply": )", nullptr );
sReq.AppendEscaped ( (const char *)pReply, EscBld::eNone, iLen );
sReq.FinishBlock ( false );
}
}
sReq += "}";
CompatWarning ( "--->\n%s\n<---", sReq.cstr() );
}
void DumpHttp ( int iReqType, const CSphString & sURL, Str_t sBody )
{
DumpHttp ( iReqType, sURL, sBody, VecTraits_T<BYTE>() );
}
static StrVec_t SplitURL ( const CSphString & sURL )
{
StrVec_t dParts;
sph::Split ( sURL.cstr(), sURL.Length(), "/", [&dParts, &sURL] ( const char * sPart, int iLen )
{
if ( !iLen )
return;
CSphString sBuf;
sBuf.SetBinary ( sPart, iLen );
Str_t tItem = FromStr ( sBuf );
UriPercentReplace ( tItem );
dParts.Add ( tItem );
}
);
return dParts;
}
static bool IsEq ( const HttpOptionsHash_t & hOpts, const CSphString & sName, const CSphString & sVal )
{
const CSphString * pVal = hOpts ( sName );
return ( pVal && *pVal==sVal );
}
void DumpNLJson ( const nljson & tVal, int iTabs )
{
const bool bIsObject = tVal.is_object();
for ( auto it=tVal.cbegin(); it!=tVal.cend(); it++ )
{
if ( bIsObject )
{
for ( int iT=0; iT<iTabs; iT++ )
std::cout << "\t";
std::cout << "key = " << it.key() << " : " << "\n";
}
for ( int iT=0; iT<iTabs; iT++ )
std::cout << "\t";
std::cout << it.value() << "\n";
if ( it->is_object() || it->is_array() )
{
DumpNLJson ( it.value(), iTabs+1 );
}
}
}
class ColumnFuxupTrait_t
{
public:
ColumnFuxupTrait_t ( const CSphSchema & tSchema )
{
for ( int i=0; i<tSchema.GetFieldsCount(); i++ )
{
const CSphString & sName = tSchema.GetField ( i ).m_sName;
const char * sDot = strchr ( sName.cstr(), '.' );
if ( !sDot )
continue;
CSphString sParentName;
sParentName.SetBinary ( sName.cstr(), sDot-sName.cstr() );
// might be already has field with such name
if ( tSchema.GetFieldIndex ( sParentName.cstr() )!=-1 )
continue;
StrVec_t & tFields = m_hParentFields.AddUnique ( sParentName );
tFields.Add ( sName );
}
}
SmallStringHash_T < StrVec_t > m_hParentFields;
CSphVector<nljson::json_pointer> m_dAttr;
CSphVector<nljson::json_pointer> m_dFieldsExists;
CSphVector<nljson::json_pointer> m_dMissedExists;
void AddColumn ( const CSphSchema & tSchema, const char * sName, const nljson & tItem, const nljson::json_pointer & tPath )
{
AddReplaceColumn ( tSchema, sName, tItem, tPath );
AddExistsParentField ( tSchema, sName, tItem, tPath );
}
void ReplaceColumn ( nljson & tFullQuery )
{
ReplaceAttrs ( tFullQuery );
ReplaceFields ( tFullQuery );
ReplaceMissedExists ( tFullQuery );
}
private:
void AddReplaceColumn ( const CSphSchema & tSchema, const char * sName, const nljson & tItem, const nljson::json_pointer & tPath )
{
bool bMatch = strcmp ( sName, "match" )==0 || strcmp ( sName, "term" )==0;
bool bTerms = strcmp ( sName, "terms" )==0;
bool bPhrase = strcmp ( sName, "match_phrase" )==0;
bool bAdd = false;
if ( bMatch || bPhrase )
{
assert ( tItem.is_object() && tItem.size()==1 );
bAdd = ( tSchema.GetFieldIndex ( tItem.cbegin().key().c_str() )==-1 );
} else if ( bTerms )
{
assert ( tItem.is_object() );
bAdd = ( tSchema.GetFieldIndex ( tItem.cbegin().key().c_str() )==-1 );
}
if ( bAdd )
m_dAttr.Add ( tPath );
}
void AddExistsParentField ( const CSphSchema & tSchema, const char * sName, const nljson & tItem, const nljson::json_pointer & tPath )
{
bool bExists = strcmp ( sName, "exists" )==0;
if ( bExists )
{
assert ( tItem.is_object() && tItem.size()==1 && tItem.cbegin().key()=="field" );
CSphString sFieldName = tItem.cbegin().value().get<std::string>().c_str();
if ( tSchema.GetFieldIndex ( sFieldName.cstr() )!=-1 )
return;
if ( m_hParentFields.Exists ( sFieldName ) )
m_dFieldsExists.Add ( tPath );
else
m_dMissedExists.Add ( tPath );
}
}
void ReplaceAttrs ( nljson & tFullQuery )
{
if ( !m_dAttr.GetLength() )
return;
//std::cout << "attrs to replace: " << dAttr.GetLength() << "\n";
for ( const auto & tIt : m_dAttr )
{
//std::cout << tIt << " : " << tFullQuery[tIt] << "\n";
nljson::json_pointer tParent = tIt.parent_pointer();
nljson::json_pointer tNew = tParent;
tNew /= "equals";
tFullQuery[tNew] = tFullQuery[tIt];
tFullQuery[tParent].erase ( tIt.back() );
if ( tFullQuery[tNew].cbegin().value().count ( "query" )==1 )
{
nljson tEq = R"({})"_json;
tEq[tFullQuery[tNew].cbegin().key()] = tFullQuery[tNew].cbegin().value()["query"];
tFullQuery[tNew] = tEq;
}
//std::cout << tParent << " : " << tFullQuery[tParent] << "\n";
}
//std::cout << tFullQuery << "\n";
}
void ReplaceFields ( nljson & tFullQuery )
{
if ( !m_dFieldsExists.GetLength() )
return;
//std::cout << "fields to replace: " << m_dFieldsExists.GetLength() << "\n";
for ( const auto & tIt : m_dFieldsExists )
{
//std::cout << tIt << " : " << tFullQuery[tIt] << "\n";
const nljson & tRefExists = tFullQuery[tIt];
assert ( tRefExists.is_object() && tRefExists.size()==1 && tRefExists.cbegin().key()=="field" );
CSphString sFieldName = tRefExists.cbegin().value().get<std::string>().c_str();
const StrVec_t & dFields = m_hParentFields[sFieldName];
nljson tExistVec = R"([])"_json;
for ( const auto & tFieldIt : dFields )
{
nljson tNewField;
tNewField["exists"] = { { "field", tFieldIt.cstr() } };
tExistVec.push_back ( tNewField );
}
nljson tShouldObj;
tShouldObj["should"] = tExistVec;
nljson tBoolObj;
tBoolObj["bool"] = tShouldObj;
nljson::json_pointer tParent = tIt.parent_pointer();
tFullQuery[tParent] = tBoolObj;
//std::cout << tParent << " : " << tFullQuery[tParent] << "\n";
}
//std::cout << tFullQuery << "\n";
}
void ReplaceMissedExists ( nljson & tFullQuery )
{
if ( !m_dMissedExists.GetLength() )
return;
// std::cout << "missed exists: " << m_dMissedExists.GetLength() << "\n";
for ( const auto & tIt : m_dMissedExists )
{
//std::cout << tIt << " : " << tFullQuery[tIt] << "\n";
nljson::json_pointer tParent = tIt.parent_pointer();
tFullQuery[tParent] = R"({ "term": { "missed_exists": "none" } })"_json;
//std::cout << tFullQuery[tParent] << "\n";
}
//std::cout << tFullQuery << "\n";
}
};
static void FixupFilterFeilds ( const CSphSchema & tSchema, const nljson::json_pointer & tParent, const nljson & tVal, ColumnFuxupTrait_t & tArg )
{
const bool bIsObject = tVal.is_object();
const bool bIsArray = tVal.is_array();
int iItem = 0;
for ( auto tIt=tVal.cbegin(); tIt!=tVal.cend(); tIt++ )
{
//std::cout << "(" << tParent << ")\t";
//if ( bIsObject )
// std::cout << tIt.key() << " : \t";
//if ( bIsObject )
// std::cout << "obj \n";
//if ( bIsArray )
// std::cout << "arr[" << iItem << "]\n";
if ( tIt->is_object() || tIt->is_array() )
{
nljson::json_pointer tPath = tParent;
if ( bIsObject )
tPath /= tIt.key();
if ( bIsArray )
tPath /= iItem;
if ( bIsObject )
tArg.AddColumn ( tSchema, tIt.key().c_str(), tIt.value(), tPath );
FixupFilterFeilds ( tSchema, tPath, tIt.value(), tArg );
}
iItem++;
}
}
static void FixupFilterMatchPhrase ( const CSphSchema & tSchema, nljson & tVal )
{
const auto & tAttrObj = tVal.begin();
if ( !tAttrObj.value().is_object() )
return;
const auto & tAttrType = tAttrObj.value().begin();
if ( !tAttrType.value().is_object() || !tAttrType.value().contains( "query" ) )
return;
if ( tSchema.GetField ( tAttrType.key().c_str() ) )
return;
nljson tNew;
tNew["equals"][tAttrType.key()] = tAttrType.value()["query"];
tVal = tNew;
}
static void FixupFilterType ( const CSphSchema & tSchema, nljson & tVal )
{
auto tAttrObj = tVal.begin();
const CSphColumnInfo * pCol = tSchema.GetAttr ( tAttrObj.key().c_str() );
if ( !pCol )
return;
// FIXME!!! implement full N to M mappings
if ( tAttrObj.value().is_string() && pCol->m_eAttrType!=SPH_ATTR_STRING )
{
CSphVariant tVal ( tAttrObj.value().get<std::string>().c_str() );
switch ( pCol->m_eAttrType )
{
case SPH_ATTR_TIMESTAMP:
case SPH_ATTR_INTEGER:
case SPH_ATTR_BOOL:
tAttrObj.value() = tVal.intval();
break;
case SPH_ATTR_FLOAT:
tAttrObj.value() = tVal.floatval();
break;
case SPH_ATTR_BIGINT:
tAttrObj.value() = tVal.int64val();
break;
default: break; // implement full N to M mappings
}
}
}
static void FixupValues ( const nljson::json_pointer & tQueryFilter, const CSphSchema & tSchema, nljson & tFullQuery )
{
if ( !tFullQuery.contains ( tQueryFilter ) || !tFullQuery[tQueryFilter].size() )
return;
for ( auto & tVal : tFullQuery[tQueryFilter].items() )
{
if ( !tVal.value().is_object() )
continue;
if ( tVal.value().cbegin().key()=="match_phrase" )
{
FixupFilterMatchPhrase ( tSchema, tVal.value() );
continue;
}
if ( tVal.value().cbegin().key()=="equals" )
{
FixupFilterType ( tSchema, tVal.value().begin().value() );
continue;
}
}
}
static void FixupFilter ( const StrVec_t & dIndexes, nljson & tFullQuery )
{
CSphSchema tSchema;
for ( const CSphString & sName : dIndexes )
{
auto tIndex ( GetServed ( sName ) );
if ( tIndex )
{
tSchema = RIdx_c( tIndex )->GetMatchSchema();
break;
}
}
if ( !tSchema.GetAttrsCount() )
return;
if ( !tFullQuery.contains ( "query" ) )
return;
nljson & tQuery = tFullQuery["query"];
ColumnFuxupTrait_t tArg ( tSchema );
nljson::json_pointer tParent;
FixupFilterFeilds ( tSchema, tParent, tQuery, tArg );
tArg.ReplaceColumn ( tQuery );
// FIXME!!! move into FixupFilterFeilds
nljson::json_pointer tQueryFilter ( "/query/bool/filter" );
FixupValues ( tQueryFilter, tSchema, tFullQuery );
nljson::json_pointer tQueryMustNot ( "/query/bool/must_not" );
FixupValues ( tQueryMustNot, tSchema, tFullQuery );
}
template<class T_FN>
static void RecursiveIterate ( const nljson::json_pointer & tParent, const nljson & tVal, T_FN fn )
{
const bool bIsObject = tVal.is_object();
const bool bIsArray = tVal.is_array();
int iItem = 0;
for ( auto tIt=tVal.cbegin(); tIt!=tVal.cend(); tIt++ )
{
if ( tIt->is_structured() )
{
nljson::json_pointer tPath = tParent;
if ( bIsObject )
tPath /= tIt.key();
if ( bIsArray )
tPath /= iItem;
RecursiveIterate ( tPath, tIt.value(), fn );
} else
{
fn ( tIt, bIsObject, bIsArray, tParent );
}
iItem++;
}
}
static bool IsKibanTable ( const VecTraits_T<CSphString> & dIndexes )
{
if ( dIndexes.GetLength()!=1 )
return false;
bool bKbnTable = false;
{
ScRL_t tLockTbl ( g_tLockKbnTable );
bKbnTable = ( dIndexes.any_of ( [] ( const CSphString & tVal ) { return g_tKbnTable.contains ( tVal.cstr() ); } ) );
}
if ( !bKbnTable )
{
ScRL_t tLock ( g_tLockAlias );
bKbnTable = ( g_hAlias.Exists ( dIndexes[0] ) );
}
return bKbnTable;
}
static void EscapeKibanaColumnNames ( const StrVec_t & dIndexes, nljson & tFullQuery )
{
if ( !IsKibanTable ( dIndexes ) )
return;
CSphVector<nljson::json_pointer> dEscNames;
nljson::json_pointer tRootPath;
RecursiveIterate ( tRootPath, tFullQuery, [&] ( nljson::const_iterator tIt, bool bIsObject, bool bIsArray, const nljson::json_pointer & tParent )
{
if ( !bIsObject )
return;
const std::string & sKey = tIt.key();
if ( strchr ( sKey.c_str(), '-' )!= nullptr && strchr ( sKey.c_str(), '.' )!=nullptr )
{
nljson::json_pointer tPath = tParent;
tPath /= tIt.key();
dEscNames.Add ( tPath );
}
}
);
StringBuilder_c sNewKey;
for ( const auto & tPath : dEscNames )
{
const char * sKey = tPath.back().c_str();
sNewKey.Clear();
sNewKey << '`';
while ( *sKey )
{
if ( *sKey=='.' )
sNewKey << "`.`";
else
sNewKey << *sKey;
sKey++;
}
sNewKey << '`';
nljson::json_pointer tParent = tPath.parent_pointer();
nljson::json_pointer tNewPath = tParent;
tNewPath /= sNewKey.cstr();
nljson tVal = tFullQuery[tPath];
tFullQuery[tNewPath] = tVal;
tFullQuery[tParent].erase ( tPath.back() );
}
}
static void FixupKibana ( const StrVec_t & dIndexes, nljson & tFullQuery )
{
// kibana tables query fixup
nljson::json_pointer tSortScript ( "/sort/_script" );
if ( tFullQuery.contains ( tSortScript ) )
{
tFullQuery.erase ( "sort" );
if ( dIndexes.GetLength() )
{
bool bKbnTable = false;
{
ScRL_t tLockTbl ( g_tLockKbnTable );
bKbnTable = ( dIndexes.any_of ( [] ( const CSphString & tVal ) { return g_tKbnTable.contains ( tVal.cstr() ); } ) );
}
if ( !bKbnTable )
{
ScRL_t tLock ( g_tLockAlias );
bKbnTable = ( g_hAlias.Exists ( dIndexes[0] ) );
}
if ( !bKbnTable )
CompatWarning ( "removed sort[_script] property at query to not kibana index '%s'", dIndexes[0].cstr() );
}
}
}
static StrVec_t ExpandIndexes ( const CSphString & sSrcIndexes, CSphString & sResIndex )
{
StrVec_t dLocalIndexes;
StrVec_t dNames = sphSplit ( sSrcIndexes.cstr(), "," );
for ( const CSphString & sName : dNames )
{
if ( !HasWildcard ( sName.cstr() ) )
{
// look for alias
const CSphString * pAliasIndex = g_hAlias ( sName );
const CSphString * pIndexName = ( pAliasIndex ? pAliasIndex : &sName );
// then look for local index
auto pServed ( GetServed ( *pIndexName ) );
if ( pServed )
dLocalIndexes.Add ( *pIndexName );
} else
{
StrVec_t dIndexName;
// look for alias
// scope for alias hash lock
{
ScRL_t tLock ( g_tLockAlias );
for ( const auto & tAliasIt : g_hAlias )
{
const CSphString & sAliasIndex = tAliasIt.second;
if ( !sphWildcardMatch ( sAliasIndex.cstr(), sName.cstr() ) )
continue;
dIndexName.Add ( sAliasIndex );
}
}
// look for local indexes from alias
if ( dIndexName.GetLength() )
{
for ( const CSphString & sIndexName : dIndexName )
{
auto pServed ( GetServed ( sIndexName ) );
if ( pServed )
dLocalIndexes.Add ( sIndexName );
}
}
// look for local indexes from wildcards
ServedSnap_t hLocal = g_pLocalIndexes->GetHash();
for ( const auto & tIt : *hLocal )
{
if ( !tIt.second )
continue;
if ( !sphWildcardMatch ( tIt.first.cstr(), sName.cstr() ) )
continue;
dLocalIndexes.Add ( tIt.first );
}
}
}
// remove duplicates
dLocalIndexes.Uniq();
StringBuilder_c sIndexes ( "," );
dLocalIndexes.Apply ( [&sIndexes] ( const CSphString & sName ) { sIndexes += sName.cstr(); } );
sResIndex = sIndexes.cstr();
return dLocalIndexes;
}
static bool Ends ( const char * sSrc, const char * sSuffix )
{
if ( !sSrc || !sSuffix )
return false;
auto iVal = (int)strlen ( sSrc );
auto iSuffix = (int)strlen ( sSuffix );
if ( iVal < iSuffix )
return false;
return strncmp ( sSrc + iVal - iSuffix, sSuffix, iSuffix ) == 0;
}
static const char g_sReplaceKw[] = ".keyword";
static void FixupAggs ( const StrVec_t & dIndexes, nljson & tFullQuery )
{
nljson::json_pointer tPathAggs ( "/aggs" );
if ( !tFullQuery.contains ( tPathAggs ) )
return;
CSphVector<nljson::json_pointer> dReplace;
for ( const auto & tAggItem : tFullQuery[tPathAggs].items() )
{
if ( !tAggItem.value().size() || !tAggItem.value().front().size() )
continue;
const auto & tVal = tAggItem.value().front().front();
if ( !tVal.is_string() )
continue;
const char * sVal = tVal.get_ptr<const nljson::string_t *>()->c_str();
if ( !Ends ( sVal, g_sReplaceKw ) )
continue;
nljson::json_pointer tPath = tPathAggs / tAggItem.key() / tAggItem.value().cbegin().key() / tAggItem.value().front().cbegin().key();
dReplace.Add ( tPath );
}
if ( dReplace.IsEmpty() )
return;
CSphSchema tSchema;
for ( const CSphString & sName : dIndexes )
{
auto tIndex ( GetServed ( sName ) );
if ( tIndex )
{
tSchema = RIdx_c( tIndex )->GetMatchSchema();
break;
}
}
if ( !tSchema.GetAttrsCount() )
return;
for ( const auto & tPath : dReplace )
{
const char * sAttr = tFullQuery[tPath].get_ptr<const nljson::string_t *>()->c_str();;
if ( tSchema.GetAttr ( sAttr ) )
continue;
int iLen = strlen ( sAttr );
CSphString sNameReplace;
sNameReplace.SetBinary ( sAttr, iLen - sizeof(g_sReplaceKw) + 1 );
const CSphColumnInfo * pCol = tSchema.GetAttr ( sNameReplace.cstr() );
if ( pCol && pCol->m_eAttrType==SPH_ATTR_STRING )
tFullQuery[tPath] = sNameReplace.cstr();
}
}
static CSphString g_sEmptySearch = R"(
{
"took": 0,
"timed_out": false,
"_shards": {
"total": 0,
"successful": 0,
"skipped": 0,
"failed": 0
},
"hits": {
"total": {
"value": 0,
"relation": "eq"
},
"max_score": 0,
"hits": []
}
}
)";
static bool DoSearch ( const CSphString & sDefaultIndex, nljson & tReq, const CSphString & sURL, CSphString & sRes )
{
// expand index(es) to index list
CSphString sIndex = sDefaultIndex;
if ( tReq.contains ( "table" ) )
sIndex = tReq["table"].get<std::string>().c_str();
else if ( tReq.contains ( "index" ) )
sIndex = tReq["index"].get<std::string>().c_str();
CSphString sExpandedIndex;
StrVec_t dIndexes = ExpandIndexes ( sIndex, sExpandedIndex );
if ( !dIndexes.GetLength() )
{
sRes = R"(
{
"took": 0,
"timed_out": false,
"_shards": {
"total": 0,
"successful": 0,
"skipped": 0,
"failed": 0
},
"hits": {
"total": {
"value": 0,
"relation": "eq"
},
"max_score": 0,
"hits": []
}
}
)";
return true;
}
tReq["table"] = sExpandedIndex.cstr();
EscapeKibanaColumnNames ( dIndexes, tReq );
FixupKibana ( dIndexes, tReq );
FixupFilter ( dIndexes, tReq );
FixupAggs ( dIndexes, tReq );
ParsedJsonQuery_t tParsedQuery;
auto& tQuery = tParsedQuery.m_tQuery;
tQuery.m_eQueryType = QUERY_JSON;
tQuery.m_sRawQuery = tReq.dump().c_str();
tParsedQuery.m_bProfile = false;
JsonObj_c tMntReq = JsonObj_c ( tQuery.m_sRawQuery.cstr() );
if ( !sphParseJsonQuery ( tMntReq, tParsedQuery ) )
{
const char * sError = TlsMsg::szError();
CompatWarning ( "%s at '%s' body '%s'", sError, sURL.cstr(), tQuery.m_sRawQuery.cstr() );
sRes = JsonEncodeResultError ( sError, GetErrorTypeName ( HttpErrorType_e::Parse ), 400 );
return false;
}
if ( !tParsedQuery.m_sWarning.IsEmpty() )
CompatWarning ( "%s", tParsedQuery.m_sWarning.cstr() );
std::unique_ptr<PubSearchHandler_c> tHandler ( CreateMsearchHandler ( sphCreateJsonQueryParser(), QUERY_JSON, tQuery ) );
tHandler->RunQueries();
CSphFixedVector<AggrResult_t *> dAggsRes ( 1 + tQuery.m_dAggs.GetLength() );
dAggsRes[0] = tHandler->GetResult ( 0 );
ARRAY_FOREACH ( i, tQuery.m_dAggs )
dAggsRes[i+1] = tHandler->GetResult ( i+1 );
sRes = sphEncodeResultJson ( dAggsRes, tQuery, nullptr, ResultSetFormat_e::ES );
bool bOk = true;
// want to see at log url and query for search error
for ( const AggrResult_t * pAggr : dAggsRes )
{
if ( !pAggr->m_iSuccesses )
{
CompatWarning ( "'%s' at '%s' body '%s'", pAggr->m_sError.cstr(), sURL.cstr(), tQuery.m_sRawQuery.cstr() );
bOk = false;
TlsMsg::Err ( pAggr->m_sError );
}
}
return bOk;
}
bool HttpCompatHandler_c::ProcessMSearch ()
{
if ( IsEmpty ( GetBody() ) )
{
ReportError ( "request body or source parameter is required", HttpErrorType_e::Parse, EHTTP_STATUS::_400 );
return false;
}
if ( !Ends ( GetBody(), "\n" ) )
{
ReportError ( "The msearch request must be terminated by a newline [\n]", HttpErrorType_e::IllegalArgument, EHTTP_STATUS::_400 );
return false;
}
int64_t tmStarted = sphMicroTimer();
CSphString sWarning;
//const HttpOptionsHash_t & hOpts = tParser.GetOptions();
CSphString sDefaultIndex;
if ( GetUrlParts().GetLength()>1 )
sDefaultIndex = GetUrlParts()[0];
//bool bRestTotalHitsInt = IsTrue ( hOpts, "rest_total_hits_as_int" );
bool bParsedOk = true;
CSphVector<nljson> tSourceReq;
int iSourceLine = 0;
SplitNdJson ( GetBody(),
[&] ( const char * sLine, int iLen )
{
nljson tItem = nljson::parse ( sLine, nullptr, false );
if ( tItem.is_discarded() )
bParsedOk = false;
if ( ( iSourceLine%2 )==0 )
{
tSourceReq.Add ( tItem );
} else
{
tSourceReq.Last().update ( tItem );
}
iSourceLine++;
}
);
if ( iSourceLine<2 || !bParsedOk )
{
ReportError ( "Validation Failed: 1: no requests added;", HttpErrorType_e::ActionRequestValidation, EHTTP_STATUS::_400 );
return false;
}
CSphFixedVector<CSphString> dRes ( tSourceReq.GetLength() );
for ( int i=0; i<dRes.GetLength(); i++ )
{
nljson & tReq = tSourceReq[i];
DoSearch ( sDefaultIndex, tReq, GetFullURL(), dRes[i] );
}
int64_t tmTook = sphMicroTimer() - tmStarted;
JsonEscapedBuilder tReply;
tReply.Appendf ( R"({"took":%d,"responses": [)", (int)( tmTook/1000 ));
ARRAY_FOREACH ( i, dRes )
{
if ( i )
tReply += ",\n";
else
tReply += "\n";
tReply += dRes[i].cstr();
}
tReply += "\n";
tReply += "]\n";
tReply += "}";
BuildReply ( tReply, EHTTP_STATUS::_200 );
return true;
}
typedef CSphVector< std::pair < CSphString, int > > DocIdVer_t;
static bool GetDocIds ( const char * sIndexName, const char * sFilterID, DocIdVer_t & dIds, CSphString & sError )
{
assert ( sIndexName );
const char * sIdName = "_id";
const char * sVerName = "_version";
JsonQuery_c tQuery;
CSphQueryItem & tItem = tQuery.m_dItems.Add();
tItem.m_sAlias = sIdName;
tItem.m_sExpr = sIdName;
CSphQueryItem & tVer = tQuery.m_dItems.Add();
tVer.m_sAlias = sVerName;
tVer.m_sExpr = sVerName;
tQuery.m_sIndexes = sIndexName;
// need full index
tQuery.m_iLimit = 1000;
tQuery.m_iMaxMatches = 1000;
if ( sFilterID )
{
CSphFilterSettings & tFilter = tQuery.m_dFilters.Add();
tFilter.m_sAttrName = "_id";
tFilter.m_eType = SPH_FILTER_STRING;
tFilter.m_dStrings.Add ( sFilterID );
tFilter.m_bExclude = false;
}
std::unique_ptr<PubSearchHandler_c> tHandler ( CreateMsearchHandler ( sphCreateJsonQueryParser(), QUERY_JSON, tQuery ) );
tHandler->RunQueries();
const AggrResult_t * pRes = tHandler->GetResult ( 0 );
if ( !pRes )
{
sError.SetSprintf ( "invalid search for index '%s'", sIndexName );
return false;
}
if ( !pRes->m_iSuccesses )
{
sError.SetSprintf ( "%s", pRes->m_sError.cstr() );
return false;
}
if ( !pRes->m_sWarning.IsEmpty() )
CompatWarning ( "%s", pRes->m_sWarning.cstr() );
const ISphSchema & tSchema = pRes->m_tSchema;
const CSphColumnInfo * pColId = tSchema.GetAttr ( sIdName );
const CSphColumnInfo * pColVer = tSchema.GetAttr ( sVerName );
if ( !pColId || !pColVer)
{
sError.SetSprintf ( "invalid attrs count %d, id=%d, version=%d, index '%s'", tSchema.GetAttrsCount(), ( pColId ? 1 : 0 ), ( pColVer ? 1 : 0 ), sIndexName );
return false;
}
if ( pColId->m_eAttrType!=SPH_ATTR_STRINGPTR )
{
sError.SetSprintf ( "invalid attr type '%s', index '%s'", AttrType2Str ( pColVer->m_eAttrType ), sIndexName );
return false;
}
if ( pColVer->m_eAttrType!=SPH_ATTR_INTEGER )
{
sError.SetSprintf ( "invalid attr type '%s', index '%s'", AttrType2Str ( pColId->m_eAttrType ), sIndexName );
return false;
}
const CSphAttrLocator & tLocId = pColId->m_tLocator;
const CSphAttrLocator & tLocVer = pColVer->m_tLocator;
auto dMatches = pRes->m_dResults.First ().m_dMatches.Slice ( pRes->m_iOffset, pRes->m_iCount );
for ( const CSphMatch & tMatch : dMatches )
{
const BYTE * pData = ( const BYTE * ) tMatch.GetAttr ( tLocId );
ByteBlob_t tStr = sphUnpackPtrAttr ( pData );
auto & tId = dIds.Add();
tId.first.SetBinary ( (const char *)tStr.first, tStr.second );
tId.second = tMatch.GetAttr ( tLocVer );
}
return true;
}
static bool GetIndexDoc ( const CSphIndex * pIndex, const char * sID, int64_t iSessionUID, CSphVector<BYTE> & dField, CSphString & sError )
{
assert ( pIndex );
CSphFixedVector<int> dFieldIds ( 1 );
dFieldIds[0] = pIndex->GetFieldId ( "_raw", DOCSTORE_TEXT );
if ( dFieldIds[0]==-1 )
{
sError.SetSprintf ( "unknown '_raw' stored field" );
return false;
}
DocID_t tDocid = (DocID_t)GetDocID(sID);
DocstoreDoc_t tDoc;
pIndex->GetDoc ( tDoc, tDocid, &dFieldIds, iSessionUID, false );
if ( !tDoc.m_dFields.GetLength() )
return true;
assert ( tDoc.m_dFields.GetLength()==1 );
tDoc.m_dFields[0].SwapData ( dField );
return true;
}
static void TableGetDoc ( const CSphString & sId, const CSphIndex * pIndex, const CSphString & sIndex, CSphVector<BYTE> & dRawDoc, int & iVersion )
{
CSphString sError;
DocIdVer_t dDocVer;
if ( !GetDocIds ( sIndex.cstr(), sId.cstr(), dDocVer, sError ) )
{
CompatWarning ( "%s", sError.cstr() );
return;
}
// doc not found
if ( !dDocVer.GetLength() )
return;
if ( dDocVer.GetLength()>2 )
{
CompatWarning ( "%s multiple documents", sId.cstr() );
return;
}
iVersion = dDocVer[0].second;
DocstoreSession_c tSession;
pIndex->CreateReader ( tSession.GetUID() );
if ( !GetIndexDoc ( pIndex, sId.cstr(), tSession.GetUID(), dRawDoc, sError ) )
{
CompatWarning ( "%s", sError.cstr() );
return;
}
}
bool HttpCompatHandler_c::ProcessKbnTableDoc()
{
assert ( GetUrlParts().GetLength() );
const CSphString & sId = GetUrlParts().Last();
CSphString sIndex = GetUrlParts()[0];
{
ScRL_t tLock ( g_tLockAlias );
const CSphString * pAliasIndex = g_hAlias ( sIndex );
if ( pAliasIndex )
sIndex = *pAliasIndex;
}
auto tServed ( GetServed ( sIndex ) );
if ( !tServed )
{
CompatWarning ( "unknown kibana table %s", sIndex.cstr() );
return false;
}
RIdx_c pIndex ( tServed );
DocstoreSession_c tSession;
pIndex->CreateReader ( tSession.GetUID() );
CSphVector<BYTE> dField;
if ( !GetIndexDoc ( pIndex, sId.cstr(), tSession.GetUID(), dField, m_sError ) )
{
CompatWarning ( "%s", m_sError.cstr() );
return false;
}
DocIdVer_t dDocVer;
if ( !GetDocIds ( sIndex.cstr(), sId.cstr(), dDocVer, m_sError ) )
{
CompatWarning ( "%s", m_sError.cstr() );
return false;
}
if ( dField.GetLength() && !dDocVer.GetLength() )
{
CompatWarning ( "%s mismatch document", sId.cstr() );
return false;
}
nljson tDoc;
tDoc["_index"] = sIndex.cstr();
tDoc["_type"] = "_doc";
tDoc["_id"] = sId.cstr();
if ( !dField.GetLength() )
{
tDoc["found"] = false;
} else
{
nljson tSrc = nljson::parse ( dField.Begin() );
tDoc["_source"] = tSrc;
tDoc["_version"] = dDocVer[0].second;
tDoc["_seq_no"] = 0;
tDoc["_primary_term"] = 1;
tDoc["found"] = true;
}
JsonEscapedBuilder tReply;
tReply += tDoc.dump().c_str();
BuildReplyHead ( Str_t ( tReply ), EHTTP_STATUS::_200 );
return true;
}
static nljson ReportGetDocError ( const CSphString & sError, HttpErrorType_e eType, const CSphString & sId, const CSphString & sIndex )
{
nljson tRes = R"({})"_json;
tRes["_index"] = sIndex.cstr();
tRes["_id"] = sId.cstr();
nljson tResError = R"({})"_json;
tResError["type"] = GetErrorTypeName ( eType );
tResError["reason"] = sError.cstr();
tResError["reason"] = sIndex.cstr();
tRes["error"] = tResError;
return tRes;
}
void HttpCompatHandler_c::ProcessKbnTableMGet()
{
nljson tReq = nljson::parse ( GetBody().first );
nljson::json_pointer tDocs ( "/docs" );
nljson::json_pointer tIds ( "/ids" );
bool bCaseDocs = ( tReq.contains ( tDocs ) );
bool bCaseIds = ( !bCaseDocs && tReq.contains ( tIds ) );
if ( !bCaseDocs && !bCaseIds )
{
ReportError ( "unknown key for a START_ARRAY, expected [docs] or [ids]", HttpErrorType_e::Parse, EHTTP_STATUS::_400 );
return;
}
CSphString sIndex;
if ( bCaseIds )
{
if ( GetUrlParts().GetLength()<2 )
{
ReportError ( "Validation Failed: 1: index is missing for doc 0;", HttpErrorType_e::ActionRequestValidation, EHTTP_STATUS::_400 );
return;
}
sIndex = GetUrlParts()[2];
ScRL_t tLock ( g_tLockAlias );
const CSphString * pAliasIndex = g_hAlias ( sIndex );
if ( pAliasIndex )
sIndex = *pAliasIndex;
if ( sIndex.IsEmpty() )
{
m_sError.SetSprintf ( "no such index [%s]", GetUrlParts()[2].cstr() );
ReportError ( nullptr, HttpErrorType_e::IndexNotFound, EHTTP_STATUS::_400 );
return;
}
auto tIndex ( GetServed ( sIndex ) );
if ( !tIndex )
{
m_sError.SetSprintf ( "no such index [%s]", GetUrlParts()[2].cstr() );
ReportError ( nullptr, HttpErrorType_e::IndexNotFound, EHTTP_STATUS::_400 );
return;
}
}
nljson tRes = R"({"docs":[]})"_json;
nljson & tResDocs = tRes[tDocs];
CSphVector<BYTE> dRawDoc;
CSphString sId;
int iDoc = 0;
nljson::json_pointer tDocId ( "/_id" );
nljson::json_pointer tDocIdx ( "/_index" );
for ( const nljson & tDoc : tReq[tDocs] )
{
if ( bCaseDocs )
{
if ( !tDoc.contains ( tDocId ) )
{
m_sError.SetSprintf ( "Validation Failed: 1: id is missing for doc %d;", iDoc );
ReportError ( nullptr, HttpErrorType_e::ActionRequestValidation, EHTTP_STATUS::_400 );
return;
}
if ( !tDoc.contains ( tDocIdx ) )
{
m_sError.SetSprintf ( "Validation Failed: 1: index is missing for doc %d;", iDoc );
ReportError ( nullptr, HttpErrorType_e::ActionRequestValidation, EHTTP_STATUS::_400 );
return;
}
sId = tDoc[tDocId].get<std::string>().c_str();
sIndex = tDoc[tDocIdx].get<std::string>().c_str();
{
ScRL_t tLock ( g_tLockAlias );
const CSphString * pAliasIndex = g_hAlias ( sIndex );
if ( pAliasIndex )
sIndex = *pAliasIndex;
}
if ( sIndex.IsEmpty() )
{
CSphString sError;
sError.SetSprintf ( "no such index [%s]", sIndex.cstr() );
tResDocs.push_back ( ReportGetDocError ( sError, HttpErrorType_e::IndexNotFound, sId, sIndex ) );
continue;
}
} else
{
sId = tDoc.get<std::string>().c_str();
}
auto tIndex ( GetServed ( sIndex ) );
if ( !tIndex )
{
CSphString sError;
sError.SetSprintf ( "no such index [%s]", sIndex.cstr() );
tResDocs.push_back ( ReportGetDocError ( sError, HttpErrorType_e::IndexNotFound, sId, sIndex ) );
continue;
}
int iVersion = 0;
dRawDoc.Resize ( 0 );
TableGetDoc ( sId, RIdx_c ( tIndex ), sIndex, dRawDoc, iVersion );
nljson tResDoc = R"({"_type": "_doc"})"_json;
tResDoc["_index"] = sIndex.cstr();
tResDoc["_id"] = sId.cstr();
if ( dRawDoc.GetLength() )
{
tResDoc["found"] = true;
tResDoc["_version"] = iVersion;
tResDoc["_seq_no"] = 1;
tResDoc["_primary_term"] = 1;
tResDoc["_source"] = nljson::parse ( dRawDoc.Begin() );
} else
{
tResDoc["found"] = false;
}
tResDocs.push_back ( tResDoc );
iDoc++;
}
JsonEscapedBuilder tReply;
tReply += tRes.dump().c_str();
BuildReply ( Str_t ( tReply ), EHTTP_STATUS::_200 );
}
void HttpCompatHandler_c::ProcessPutTemplate()
{
assert ( GetUrlParts().GetLength() );
const CSphString & sTblName = GetUrlParts()[1];
nljson tTbl = nljson::parse ( GetBody().first );
if ( !tTbl.contains ( "order" ) )
tTbl["order"] = 0;
if ( !tTbl.contains ( "version" ) )
tTbl["version"] = 1;
{
ScWL_t tLockTbl ( g_tLockKbnTable );
g_tKbnTable["templates"][sTblName.cstr()] = tTbl;
}
const char * sRes = "{\"acknowledged\":true}";
BuildReply ( FromSz ( sRes ), EHTTP_STATUS::_200 );
}
void HttpCompatHandler_c::ProcessIgnored()
{
const char * sRes = "{\"took\":0, \"ignored\":true, \"errors\":false}";
BuildReply ( FromSz ( sRes ), EHTTP_STATUS::_200 );
}
static bool ProcessFilterPath ( const nljson & tNode, const VecTraits_T<CSphString> & dParts, int iPart, nljson & tPart )
{
if ( iPart>=dParts.GetLength() )
return false;
const CSphString & sPart = dParts[iPart];
if ( sPart!="*" )
{
nljson::const_iterator tIt = tNode.find ( sPart.cstr() );
if ( tIt==tNode.end() )
return false;
if ( iPart+1<dParts.GetLength() && ( tIt->is_object() || tIt->is_array() ) )
{
nljson tVal ( tIt->is_array() ? nljson::array() : nljson::object() );
tPart[sPart.cstr()] = tVal;
return ProcessFilterPath ( *tIt, dParts, iPart+1, tPart[sPart.cstr()] );
} else
{
tPart[sPart.cstr()] = *tIt;
return true;
}
} else
{
bool bGotMatch = false;
for ( const auto & tItem : tNode.items() )
{
if ( iPart+1<dParts.GetLength() && ( tItem.value().is_object() || tItem.value().is_array() ) )
{
nljson tVal ( tItem.value().is_array() ? nljson::array() : nljson::object() );
if ( tPart.is_array() )
{
tPart.push_back ( tVal );
bGotMatch |= ProcessFilterPath ( tItem.value(), dParts, iPart+1, tPart.back() );
} else
{
tPart[tItem.key()] = ( tVal );
bGotMatch |= ProcessFilterPath ( tItem.value(), dParts, iPart+1, tPart[tItem.key()] );
}
} else if ( iPart+1==dParts.GetLength() ) // add only matched leaf items
{
if ( tPart.is_array() )
{
tPart.push_back ( tItem.value() );
} else
{
tPart[tItem.key()] = tItem.value();
}
bGotMatch = true;
}
}
return bGotMatch;
}
}
static int ProcessFilter ( const CSphString * sFilters, nljson & tRes )
{
if ( !sFilters )
return 0;
nljson tResFiltered = R"({})"_json;
StrVec_t dFilters = sphSplit ( sFilters->cstr(), "," );
for ( const CSphString & sFilter : dFilters )
{
nljson tPartJs = R"({})"_json;
StrVec_t dParts = sphSplit ( sFilter.cstr(), "." );
if ( ProcessFilterPath ( tRes, dParts, 0, tPartJs ) )
tResFiltered.merge_patch ( tPartJs );
}
tRes = tResFiltered;
return 1;
}
void HttpCompatHandler_c::ProcessKbnTableGet()
{
const CSphString & sSrcIndexName = GetUrlParts()[0];
nljson::json_pointer tTblName;
{
ScRL_t tLockTbl ( g_tLockKbnTable );
if ( g_tKbnTable.contains ( sSrcIndexName.cstr() ) )
{
tTblName /= sSrcIndexName.cstr();
} else
{
ScRL_t tLock ( g_tLockAlias );
const CSphString * pAliasIndex = g_hAlias ( sSrcIndexName );
if ( pAliasIndex )
tTblName /= pAliasIndex->cstr();
}
if ( tTblName.empty() || !g_tKbnTable.contains ( tTblName ) )
{
ReportMissedIndex ( sSrcIndexName );
return;
}
}
nljson tRes;
if ( tTblName.to_string().rfind ( "/_" )==0 )
{
ScRL_t tLockTbl ( g_tLockKbnTable );
tRes = g_tKbnTable[tTblName];
} else
{
{
ScRL_t tLockTbl ( g_tLockKbnTable );
tRes[tTblName] = g_tKbnTable[tTblName];
}
// get inner object
if ( GetUrlParts().GetLength()>1 )
{
nljson::json_pointer tInner = tTblName;
for ( int i=1; i<GetUrlParts().GetLength(); i++ )
tInner /= GetUrlParts()[i].cstr();
if ( tRes.contains ( tInner ) )
{
nljson tTbl = tRes[tInner];
tRes = tTbl;
}
}
}
ProcessFilter ( GetOptions() ( "filter_path" ), tRes );
JsonEscapedBuilder tReply;
tReply += tRes.dump().c_str();
BuildReplyHead ( Str_t ( tReply ), EHTTP_STATUS::_200 );
}
static int ProcessFilterSource ( const CSphString * sSourceFilter, nljson & tRes )
{
if ( !sSourceFilter )
return 0;
// create list of json_pointer to columns at _source field
StrVec_t dFilters = sphSplit ( sSourceFilter->cstr(), "," );
CSphFixedVector<nljson::json_pointer> dColumns ( dFilters.GetLength() );
ARRAY_FOREACH ( i, dColumns )
{
std::string sVal = dFilters[i].cstr();
ReplaceSubstring ( sVal, ".", "/" );
sVal = "/" + sVal;
dColumns[i] = nljson::json_pointer ( sVal );
}
// filter _source field
nljson::json_pointer tHits ( "/hits/hits" );
nljson::json_pointer tSrcCol ( "/_source" );
for ( auto & tItem : tRes[tHits].items() )
{
nljson & tHit = tItem.value();
if ( !tHit.is_object() || !tHit.contains ( tSrcCol ) )
continue;
const nljson & tSrc = tHit[tSrcCol];
nljson tSrcFiltered = R"({})"_json;
for ( const auto & tCol : dColumns )
{
if ( tSrc.contains ( tCol ) )
tSrcFiltered[tCol] = tSrc[tCol];
}
tHit[tSrcCol] = tSrcFiltered;
}
return dColumns.GetLength();
}
static void ProcessKbnResult ( const CSphString * sSourceFilter, const CSphString * sFilterPath, CSphString & sRes )
{
nljson tRes = nljson::parse ( sRes.cstr(), nullptr, false );
if ( tRes.is_discarded() )
return;
nljson::json_pointer tHits ( "/hits/hits" );
if ( !tRes.contains ( tHits ) )
return;
int iFixed = 0;
nljson::json_pointer tRawCol ( "/_source/_raw" );
for ( auto & tItem : tRes[tHits].items() )
{
nljson & tHit = tItem.value();
if ( !tHit.is_object() || !tHit.contains ( tRawCol ) )
continue;
const auto & tRaw = tHit[tRawCol];
const auto tRawStr = tRaw.get<std::string>();
if ( tRawStr.empty() )
continue;
nljson tRawObj = nljson::parse ( tRawStr, nullptr, false );
if ( tRawObj.is_discarded() )
return;
nljson & tSrc = tHit["_source"];
tSrc.merge_patch ( tRawObj );
tSrc.erase ( "_raw" );
iFixed++;
}
iFixed += ProcessFilter ( sFilterPath, tRes );
iFixed += ProcessFilterSource ( sSourceFilter, tRes );
if ( iFixed )
sRes = tRes.dump().c_str();
}
// FIXME!!! replace with requests to every index with count(*) \ implicit grouper instead of this hack
// or implement group by IndexTag
static bool EmulateIndexCount ( const CSphString & sIndex, const nljson & tReq, CSphVector<BYTE> & dResult )
{
bool bCountFromIdx = ( tReq.size()>=1 && ( tReq.contains ( "aggs" ) || ( tReq.size()==2 && tReq.contains ( "size" ) ) ) );
if ( !bCountFromIdx )
return false;
nljson::json_pointer tField ( "/aggs/indices/terms/field" );
bool bFieldIndex = ( tReq.contains ( tField ) && tReq[tField]=="_index" );
if ( !bFieldIndex )
return false;
CSphString sFilter = sIndex;
if ( sFilter=="_all" )
sFilter = "*";
nljson tRes = R"({
"took": 10,
"timed_out": false,
"_shards": {
"total": 0,
"successful": 0,
"skipped": 0,
"failed": 0
},
"hits": {
"total": {
"value": 10,
"relation": "eq"
},
"max_score": null,
"hits": []
},
"aggregations": {
"indices": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": []
}
}
})"_json;
nljson::json_pointer tBuckets ( "/aggregations/indices/buckets" );
int iIndexes = 0;
int iDocsTotal = 0;
ServedSnap_t hLocal = g_pLocalIndexes->GetHash();
for ( const auto & tIt : *hLocal )
{
if ( !tIt.second )
continue;
if ( !sFilter.IsEmpty() && !sphWildcardMatch ( tIt.first.cstr(), sFilter.cstr() ) )
continue;
RIdx_c tIdx ( tIt.second );
int iDocsCount = tIdx->GetStats().m_iTotalDocuments;
if ( !iDocsCount )
continue;
iIndexes++;
iDocsTotal += iDocsCount;
nljson tItem = R"({})"_json;
tItem["key"] = tIt.first.cstr();
tItem["doc_count"] = iDocsCount;
tRes[tBuckets].push_back ( tItem );
}
tRes["_shards"]["total"] = iIndexes;
tRes["_shards"]["successful"] = iIndexes;
tRes["hits"]["total"]["value"] = iDocsTotal;
std::string sRes = tRes.dump();
HttpBuildReply ( dResult, EHTTP_STATUS::_200, sRes.c_str(), sRes.length(), false );
return true;
}
bool HttpCompatHandler_c::ProcessSearch()
{
if ( IsEmpty ( GetBody() ) )
{
ReportError ( "request body or source parameter is required", HttpErrorType_e::Parse, EHTTP_STATUS::_400 );
return false;
}
const CSphString & sIndex = GetUrlParts()[0];
nljson tReq = nljson::parse ( GetBody().first, nullptr, false );
if ( tReq.is_discarded())
{
ReportError ( "invalid body", HttpErrorType_e::Parse, EHTTP_STATUS::_400 );
return false;
}
if ( EmulateIndexCount ( sIndex, tReq, GetResult() ) )
return true;
CSphString sRes;
if ( !DoSearch ( sIndex, tReq, GetFullURL(), sRes ) )
{
m_sError = TlsMsg::MoveToString();
ReportError ( nullptr, HttpErrorType_e::Parse, EHTTP_STATUS::_400 );
return false;
}
// filter_path and _source uri params
ProcessKbnResult ( GetOptions()( "_source" ), GetOptions()( "filter_path" ), sRes );
BuildReply ( FromStr ( sRes ), EHTTP_STATUS::_200 );
return true;
}
void HttpCompatHandler_c::ProcessCount()
{
if ( IsEmpty ( GetBody() ) )
{
ReportError ( "request body or source parameter is required", HttpErrorType_e::Parse, EHTTP_STATUS::_400 );
return;
}
const CSphString & sIndex = GetUrlParts()[0];
nljson tReq = nljson::parse ( GetBody().first, nullptr, false );
if ( tReq.is_discarded())
{
ReportError ( "invalid body", HttpErrorType_e::Parse, EHTTP_STATUS::_400 );
return;
}
CSphString sRes;
if ( !DoSearch ( sIndex, tReq, GetFullURL(), sRes ) )
{
BuildReply ( FromStr ( sRes ), EHTTP_STATUS::_400 );
return;
}
nljson tRef = nljson::parse ( sRes.cstr(), nullptr, false );
if ( !tRef.contains ( "error" ) )
{
// _count transform
// FIXME !!! work only up to limit now
nljson tRes;
tRes["count"] = tRef["hits"]["total"];
tRes["_shards"] = tRef["_shards"];
sRes = tRes.dump().c_str();
}
// filter_path and _source uri params
ProcessKbnResult ( GetOptions()( "_source" ), GetOptions()( "filter_path" ), sRes );
BuildReply ( FromStr ( sRes ), EHTTP_STATUS::_200 );
}
static void CatAliases ( bool bJson, StringBuilder_c & sRes )
{
nljson tJsonRes = R"([])"_json;
{
ScRL_t tLock ( g_tLockAlias );
for ( const auto & tAliasIt : g_hAlias )
{
const CSphString & sIndex = tAliasIt.second;
const CSphString & sAlias = tAliasIt.first;
if ( bJson )
{
nljson tItem =
R"({
"filter": "-",
"routing.index": "-",
"routing.search": "-",
"is_write_index": "-"
})"_json;
tItem["alias"] = sAlias.cstr();
tItem["index"] = sIndex.cstr();
tJsonRes.push_back ( tItem );
} else
{
sRes.Appendf ( "%s %s - - - -\n", sAlias.cstr(), sIndex.cstr() );
}
}
}
if ( bJson )
sRes += tJsonRes.dump().c_str();
}
static void CatMaster ( bool bJson, StringBuilder_c & sRes )
{
ScRL_t tLockTbl ( g_tLockKbnTable );
const nljson & tMasterTbl = g_tKbnTable["master"];
if ( bJson )
{
nljson tJsonRes = R"([])"_json;
tJsonRes.push_back ( tMasterTbl );
sRes += tJsonRes.dump().c_str();
} else
{
sRes.Appendf ( "%s %s %s %s", tMasterTbl["id"].get<std::string>().c_str(), tMasterTbl["host"].get<std::string>().c_str(), tMasterTbl["ip"].get<std::string>().c_str(), tMasterTbl["node"].get<std::string>().c_str() );
}
}
static void CatTemplates ( bool bJson, const char * sFilter, StringBuilder_c & sRes )
{
nljson tJsonRes = R"([])"_json;
StringBuilder_c sTmp;
ScRL_t tLockTbl ( g_tLockKbnTable );
const nljson & tTemplates = g_tKbnTable["templates"];
for ( const auto & tIt : tTemplates.items() )
{
const std::string & sName = tIt.key();
if ( sFilter && !sphWildcardMatch ( sName.c_str(), sFilter ) )
continue;
const nljson & tPattern = tIt.value()["index_patterns"];
const nljson & tOrder = tIt.value()["order"];
const nljson & tVersion = tIt.value()["version"];
sTmp.Clear();
sTmp.StartBlock ( ", ", "[", "]" );
for ( const auto & tItPattern : tPattern.items() )
sTmp += tItPattern.value().get<std::string>().c_str();
sTmp.FinishBlock ( false );
if ( bJson )
{
nljson tItem;
tItem["name"] = sName;
tItem["index_patterns"] = sTmp.cstr();
tItem["order"] = tOrder;
tItem["version"] = tVersion;
tJsonRes.push_back ( tItem );
} else
{
sRes.Appendf ( "%s %s %s %s\n", sName.c_str(), sTmp.cstr(), tOrder.get<std::string>().c_str(), tVersion.get<std::string>().c_str() );
}
}
if ( bJson )
sRes += tJsonRes.dump().c_str();
}
struct CatIndexDesc_t
{
CSphString m_sName;
int64_t m_iDocs;
int64_t m_iSize;
};
// FIXME!!! add support of another 120 column names and shortcuts
enum class CatColumns_e
{
HEALTH = 0,
STATUS,
INDEX,
UUID,
PRI,
REP,
DOCS_COUNT,
DOCS_DEL,
STORE_SIZE,
PRI_SIZE,
TOTAL
};
static const char * g_sCatColumnNames[(int)CatColumns_e::TOTAL] = {
"health,h",
"status,s",
"index,i,idx",
"id,uuid",
"pri,p,shards.primary,shardsPrimary",
"rep,r,shards.replica,shardsReplica",
"docs.count,dc,docsCount",
"docs.deleted,dd,docsDeleted",
"store.size,ss,storeSize",
"pri.store.size"
};
static SmallStringHash_T<CatColumns_e> g_hCatColumnNames;
static void CatColumnsSetup()
{
StrVec_t dNames;
for ( int i=0; i<(int)CatColumns_e::TOTAL; i++ )
{
dNames = sphSplit ( g_sCatColumnNames[i], "," );
for ( const CSphString & sName : dNames )
g_hCatColumnNames.Add ( (CatColumns_e)i, sName );
}
}
static CSphVector<CSphNamedInt> GetCatIndexesColumns ( const CSphString * pColumns )
{
CSphVector<CSphNamedInt> dCol;
if ( !pColumns )
{
dCol.Resize ( (int)CatColumns_e::TOTAL );
for ( int i=0; i<(int)CatColumns_e::TOTAL; i++ )
{
dCol[i].second = i;
const char * sName = g_sCatColumnNames[i];
const char * sDel = strchr ( sName, ',' );
if ( sDel )
dCol[i].first.SetBinary ( sName, sDel-sName );
else
dCol[i].first = sName;
}
} else
{
StrVec_t dParsed = sphSplit ( pColumns->cstr(), "," );
ARRAY_FOREACH ( i, dParsed )
{
const CatColumns_e * pCol = g_hCatColumnNames ( dParsed[i] );
if ( pCol )
dCol.Add ( std::make_pair ( dParsed[i], (int)( *pCol ) ) );
}
}
return dCol;
}
static void CatPrintColumn ( bool bJson, const CSphNamedInt & tCol, const CatIndexDesc_t & tDesc, nljson & tJsonItem, StringBuilder_c & sRes )
{
switch ( (CatColumns_e)tCol.second )
{
case CatColumns_e::HEALTH:
if ( bJson )
tJsonItem[tCol.first.cstr()] = "green";
else
sRes << "green";
break;
case CatColumns_e::STATUS:
if ( bJson )
tJsonItem[tCol.first.cstr()] = "open";
else
sRes << "open";
break;
case CatColumns_e::INDEX:
if ( bJson )
tJsonItem[tCol.first.cstr()] = tDesc.m_sName.cstr();
else
sRes << tDesc.m_sName;
break;
case CatColumns_e::UUID:
{
CSphString sBuf;
sBuf.SetSprintf ( "%016" PRIx64, sphFNV64 ( tDesc.m_sName.cstr() ) );
if ( bJson )
tJsonItem[tCol.first.cstr()] = sBuf.cstr();
else
sRes << sBuf;
}
break;
case CatColumns_e::PRI:
if ( bJson )
tJsonItem[tCol.first.cstr()] = 1;
else
sRes << 1;
break;
case CatColumns_e::REP:
if ( bJson )
tJsonItem[tCol.first.cstr()] = 0;
else
sRes << 0;
break;
case CatColumns_e::DOCS_COUNT:
if ( bJson )
tJsonItem[tCol.first.cstr()] = tDesc.m_iDocs;
else
sRes << tDesc.m_iDocs;
break;
case CatColumns_e::DOCS_DEL:
if ( bJson )
tJsonItem[tCol.first.cstr()] = 0;
else
sRes << 0;
break;
case CatColumns_e::STORE_SIZE:
if ( bJson )
tJsonItem[tCol.first.cstr()] = tDesc.m_iSize;
else
sRes << tDesc.m_iSize;
break;
case CatColumns_e::PRI_SIZE:
if ( bJson )
tJsonItem[tCol.first.cstr()] = tDesc.m_iSize;
else
sRes << tDesc.m_iSize;
break;
default: break;
}
}
// FXIME!!! add support of 'v' option - column names
static void CatIndexes ( bool bJson, const char * sFilter, const CSphString * pColumns, StringBuilder_c & sRes )
{
nljson tJsonRes = R"([])"_json;
CSphVector<CatIndexDesc_t> dDesc;
ServedSnap_t hLocal = g_pLocalIndexes->GetHash();
for ( const auto & tIt : *hLocal )
{
if ( !tIt.second )
continue;
if ( sFilter && !sphWildcardMatch ( tIt.first.cstr(), sFilter ) )
continue;
RIdx_c tIdx ( tIt.second );
CatIndexDesc_t & tDesc = dDesc.Add();
tDesc.m_sName = tIt.first;
const CSphSourceStats & tStat = tIdx->GetStats();
tDesc.m_iDocs = tStat.m_iTotalDocuments;
CSphIndexStatus tStatus;
tIdx->GetStatus ( &tStatus );
tDesc.m_iSize = tStatus.m_iDiskUse;
}
auto dCol = GetCatIndexesColumns ( pColumns );
nljson tJsonItem = R"({})"_json;
for ( const CatIndexDesc_t & tDesc : dDesc )
{
if ( bJson )
tJsonItem = R"({})"_json;
else
sRes.StartBlock ( " " );
for ( const auto & tCol : dCol )
CatPrintColumn ( bJson, tCol, tDesc, tJsonItem, sRes );
if ( bJson )
tJsonRes.push_back ( tJsonItem );
else
{
sRes.FinishBlock ( false );
sRes += "\n";
}
}
if ( bJson )
sRes += tJsonRes.dump().c_str();
}
void HttpCompatHandler_c::ProcessCat()
{
assert ( GetUrlParts().GetLength()>=2 );
bool bJson = IsEq ( GetOptions(), "format", "json" );
StringBuilder_c sRes;
if ( GetUrlParts()[1]=="aliases" )
{
CatAliases ( bJson, sRes );
} else if ( GetUrlParts()[1]=="master" )
{
CatMaster ( bJson, sRes );
} else if ( GetUrlParts()[1]=="templates" )
{
CatTemplates ( bJson, ( GetUrlParts().GetLength()>=3 ? GetUrlParts()[2].cstr() : nullptr ), sRes );
} else if ( GetUrlParts()[1]=="indices" )
{
CatIndexes ( bJson, ( GetUrlParts().GetLength()>=3 ? GetUrlParts()[2].cstr() : nullptr ), GetOptions() ( "h" ), sRes );
} else
{
sRes.Sprintf ( "Incorrect HTTP method for uri [%s] and method [GET], allowed: [POST]", GetFullURL().cstr() );
HttpErrorReply ( GetResult(), EHTTP_STATUS::_405, sRes.cstr() );
return;
}
BuildReplyHead ( Str_t ( sRes ), EHTTP_STATUS::_200 );
}
void HttpCompatHandler_c::ProcessEmptyHead ()
{
nljson tJsonRes = R"(
{
"name" : "4e9d933ebde2",
"cluster_name" : "docker-cluster",
"cluster_uuid" : "Z7igA6xDRDKCVwnMuyXCOQ",
"version" : {
"number" : "7.4.1",
"build_flavor" : "default",
"build_type" : "docker",
"build_hash" : "fc0eeb6e2c25915d63d871d344e3d0b45ea0ea1e",
"build_date" : "2019-10-22T17:16:35.176724Z",
"build_snapshot" : false,
"lucene_version" : "8.2.0",
"minimum_wire_compatibility_version" : "6.8.0",
"minimum_index_compatibility_version" : "6.0.0-beta1"
},
"tagline" : "You Know, for Search"
}
)"_json;
std::string sRes = tJsonRes.dump();
BuildReplyHead ( FromStd ( sRes ), EHTTP_STATUS::_200 );
}
static bool GetIndexComplexFields ( const CSphString & sIndex, ComplexFields_t & dFields )
{
auto tIndex ( GetServed ( sIndex ) );
if ( !tIndex )
{
CompatWarning ( "unknown kibana table %s", sIndex.cstr() );
return false;
}
const CSphSchema & tSchema = RIdx_c( tIndex )->GetMatchSchema();
for ( const CSphColumnInfo & tField : tSchema.GetFields() )
AddComplexField ( tField.m_sName.cstr(), dFields );
return true;
}
struct CompatInsert_t
{
Str_t m_sBody;
const CSphString & m_sIndex;
const bool m_bReplace;
const char * m_sId { nullptr };
CompatInsert_t ( Str_t sBody, const CSphString & sIndex, bool bReplace )
: m_sBody ( sBody ), m_sIndex ( sIndex ), m_bReplace ( bReplace )
{}
};
void HttpCompatHandler_c::ProcessInsertIntoIdx ( const CompatInsert_t & tIns )
{
SqlStmt_t tStmt;
tStmt.m_eStmt = ( tIns.m_bReplace ? STMT_REPLACE : STMT_INSERT );
tStmt.m_sIndex = tIns.m_sIndex;
tStmt.m_tQuery.m_sIndexes = tIns.m_sIndex;
tStmt.m_dInsertSchema.Add ( sphGetDocidName() );
SqlInsert_t & tId = tStmt.m_dInsertValues.Add();
tId.m_iType = SqlInsert_t::CONST_INT;
if ( tIns.m_sId )
tId.SetValueInt ( strtoull ( tIns.m_sId, NULL, 10 ), false );
JsonObj_c tSource ( tIns.m_sBody.first );
bool bInserted = ( ParseJsonInsertSource ( tSource, tStmt, tIns.m_bReplace, m_sError ) && InsertDoc ( tStmt, m_sError ) );
if ( !bInserted )
{
ReportError ( nullptr, HttpErrorType_e::ContentParse, EHTTP_STATUS::_400, tIns.m_sIndex.cstr() );
} else
{
DocID_t tLastDoc = 0;
if ( session::LastIds().GetLength() )
tLastDoc = session::LastIds().Last();
nljson tRes;
tRes["_index"] = tIns.m_sIndex.cstr();
tRes["_type"] = "_doc";
tRes["_id"] = tLastDoc;
tRes["_version"] = 1;
tRes["_seq_no"] = 0;
tRes["_primary_term"] = 1;
tRes["result"] = ( ( tIns.m_bReplace && tIns.m_sId ) ? "updated" : "created" );
tRes["_shards"] = R"( { "total": 1, "successful": 1, "failed": 0 } )"_json;
std::string sRes = tRes.dump();
BuildReply ( FromStd ( sRes ), EHTTP_STATUS::_200 );
}
}
bool HttpCompatHandler_c::ProcessInsert()
{
if ( IsEmpty ( GetBody() ) )
{
ReportError ( "request body or source parameter is required", HttpErrorType_e::Parse, EHTTP_STATUS::_400 );
return true;
}
bool bDocReq = ( GetUrlParts()[1]=="_doc" );
CSphString sIndex;
StrVec_t dIndexes = ExpandIndexes ( GetUrlParts()[0], sIndex );
if ( sIndex.IsEmpty() )
{
ReportMissedIndex ( GetUrlParts()[0] );
return true;
}
CompatInsert_t tIns ( GetBody(), sIndex, bDocReq );
if ( GetUrlParts().GetLength()>2 )
tIns.m_sId = GetUrlParts()[2].cstr();
// index/_doc without id allowed only for POST
if ( bDocReq && tIns.m_sId==nullptr && GetRequestType()!=HTTP_POST )
{
ReportIncorrectMethod ( "POST" );
return true;
}
// index/_create without id not allowed
if ( !bDocReq && tIns.m_sId==nullptr )
{
if ( GetRequestType()==HTTP_POST )
{
m_sError.SetSprintf ( "Rejecting mapping update to [%s] as the final mapping would have more than 1 type: [_doc, _create]", sIndex.cstr() );
ReportError ( nullptr, HttpErrorType_e::IllegalArgument, EHTTP_STATUS::_400 );
} else
{
ReportIncorrectMethod ( "POST" );
}
return true;
}
if ( !IsKibanTable ( dIndexes ) )
{
ProcessInsertIntoIdx ( tIns );
return true;
}
nljson tSrc = nljson::parse ( GetBody().first );
int iVersion = 1;
// check \ get document version vs _create \ _doc
DocIdVer_t dIds;
if ( !GetDocIds ( sIndex.cstr(), tIns.m_sId, dIds, m_sError ) )
{
CompatWarning ( "doc '%s', error: %s", tIns.m_sId, m_sError.cstr() );
return false;
}
if ( dIds.GetLength() )
{
if ( dIds.GetLength()!=1 )
{
CompatWarning ( "multiple (%d) docs '%s' found", dIds.GetLength(), tIns.m_sId );
return false;
}
iVersion = dIds[0].second + 1;
if ( !tIns.m_bReplace )
{
m_sError.SetSprintf ( "[%s]: version conflict, document already exists (current version [%d])", tIns.m_sId, iVersion );
ReportError ( nullptr, HttpErrorType_e::VersionConflictEngine, EHTTP_STATUS::_409, sIndex.cstr() );
return true;
}
}
ComplexFields_t dFields;
if ( !GetIndexComplexFields ( sIndex, dFields ) )
return false;
bool bInserted = InsertDoc ( sIndex, dFields, tSrc, tIns.m_bReplace, tIns.m_sId, iVersion, m_sError );
if ( !bInserted )
CompatWarning ( "doc '%s', error: %s", tIns.m_sId, m_sError.cstr() );
if ( !bInserted )
{
m_sError.SetSprintf ( "[%s]: version conflict, document already exists (current version [%d])", tIns.m_sId, iVersion );
ReportError ( nullptr, HttpErrorType_e::VersionConflictEngine, EHTTP_STATUS::_409, sIndex.cstr() );
} else
{
nljson tRes;
tRes["_index"] = sIndex.cstr();
tRes["_type"] = "_doc";
tRes["_id"] = tIns.m_sId;
tRes["_version"] = iVersion;
tRes["_seq_no"] = 0;
tRes["_primary_term"] = 1;
tRes["result"] = ( ( GetUrlParts()[1]=="_create" || iVersion==1 ) ? "created" : "updated" );
tRes["_shards"] = R"( { "total": 1, "successful": 1, "failed": 0 } )"_json;
std::string sRes = tRes.dump();
BuildReply ( FromStd ( sRes ), EHTTP_STATUS::_200 );
}
return true;
}
bool HttpCompatHandler_c::ProcessDeleteDoc()
{
CSphString sIndex;
ExpandIndexes ( GetUrlParts()[0], sIndex );
const CSphString & sId = GetUrlParts()[2];
if ( sIndex.IsEmpty() )
{
ReportMissedIndex ( GetUrlParts()[0] );
return true;
}
// get document version vs _create
int iVersion = 1;
DocIdVer_t dIds;
if ( !GetDocIds ( sIndex.cstr(), sId.cstr(), dIds, m_sError ) )
{
CompatWarning ( "doc '%s', error: %s", sId.cstr(), m_sError.cstr() );
return false;
}
if ( dIds.GetLength() )
{
if ( dIds.GetLength()!=1 )
{
CompatWarning ( "multiple (%d) docs '%s' found", dIds.GetLength(), sId.cstr() );
return false;
}
iVersion = dIds[0].second;
SqlStmt_t tStmt;
tStmt.m_sIndex = sIndex;
tStmt.m_tQuery.m_sIndexes = sIndex;
CSphFilterSettings & tFilter = tStmt.m_tQuery.m_dFilters.Add();
tFilter.m_sAttrName = "id";
tFilter.m_eType = SPH_FILTER_VALUES;
tFilter.m_dValues.Add ( (int64_t)GetDocID ( sId.cstr() ) );
tFilter.m_bExclude = false;
std::unique_ptr<StmtErrorReporter_i> pReporter ( CreateHttpErrorReporter() );
sphHandleMysqlDelete ( *pReporter.get(), tStmt, Str_t() );
if ( pReporter->IsError() )
{
CompatWarning ( "doc '%s', error: %s", sId.cstr(), pReporter->GetError() );
ReportError ( "request body or source parameter is required", HttpErrorType_e::Parse, EHTTP_STATUS::_400 );
return true;
}
}
nljson tRes;
tRes["_index"] = sIndex.cstr();
tRes["_type"] = "_doc";
tRes["_id"] = sId.cstr();
tRes["_version"] = iVersion;
tRes["_seq_no"] = 0;
tRes["_primary_term"] = 1;
tRes["result"] = ( dIds.GetLength() ? "deleted" : "not_found" );
tRes["_shards"] = R"( { "total": 1, "successful": 1, "failed": 0 } )"_json;
std::string sRes = tRes.dump();
BuildReply ( FromStd ( sRes ), EHTTP_STATUS::_200 );
return true;
}
typedef bool ( *fnScript ) ( const nljson & tParams, int & iVersion, nljson & tDoc, CSphString & sError );
static bool fnScriptUpdate1 ( const nljson & tParams, int & iVersion, nljson & tDoc, CSphString & sError )
{
nljson::json_pointer tParamType ( "/type" );
nljson::json_pointer tParamCntName ( "/counterFieldName" );
nljson::json_pointer tParamCnt ( "/count" );
nljson::json_pointer tParamTime ( "/time" );
if ( !tParams.contains ( tParamType ) || !tParams.contains ( tParamCntName ) || !tParams.contains ( tParamCnt ) || !tParams.contains ( tParamTime ) )
{
sError.SetSprintf ( "missed parameter: %s=%d, %s=%d, %s=%d, %s=%d",
tParamType.to_string().c_str(), (int)tParams.contains ( tParamType ),
tParamCntName.to_string().c_str(), (int)tParams.contains ( tParamCntName ),
tParamCnt.to_string().c_str(), (int)tParams.contains ( tParamCnt ),
tParamTime.to_string().c_str(), (int)tParams.contains ( tParamTime ) );
return false;
}
std::string sValType = tParams[tParamType];
std::string sValCntName = tParams[tParamCntName];
int iValCnt = tParams[tParamCnt];
if ( !tDoc.contains ( sValType ) ||! tDoc[sValType].contains ( sValCntName ) )
{
tDoc[sValType][sValCntName] = iValCnt;
} else
{
int iPrevCnt = tDoc[sValType][sValCntName];
tDoc[sValType][sValCntName] = iPrevCnt + iValCnt;
}
tDoc["updated_at"] = tParams[tParamTime];
return true;
}
static std::pair < const char *, fnScript > g_sScripts[] = {
{ R"(
if (ctx._source[params.type][params.counterFieldName] == null)
{
ctx._source[params.type][params.counterFieldName] = params.count;
} else {
ctx._source[params.type][params.counterFieldName] += params.count;
}
ctx._source.updated_at = params.time;
)", fnScriptUpdate1 },
{ nullptr, nullptr }
};
static SmallStringHash_T< fnScript > g_hScripts;
static void StripSpaces ( char * sBuf )
{
char * sDst = sBuf;
char * sSrc = sBuf;
while ( *sSrc )
{
if ( sphIsSpace ( *sSrc ) )
sSrc++;
else
*sDst++ = *sSrc++;
}
*sDst = '\0';
}
void CreateScripts()
{
int iScript = 0;
for ( const auto & tItem : g_sScripts )
{
if ( !tItem.first )
break;
CSphString sScript = tItem.first;
StripSpaces ( const_cast<char *>( sScript.cstr() ) );
if ( !g_hScripts.Add ( tItem.second, sScript ) )
CompatWarning ( "duplicate script %d found %s", iScript, tItem.first );
iScript++;
}
}
static void ReportUpated ( const char * sId, int iVersion, const char * sIndex, const char * sOperation, const nljson & tDoc, CSphVector<BYTE> & dResult )
{
nljson tRes;
tRes["_index"] = sIndex;
tRes["_type"] = "_doc";
tRes["_id"] = sId;
tRes["_version"] = iVersion;
tRes["result"] = sOperation;
tRes["_shards"] = R"( { "total": 1, "successful": 1, "failed": 0 } )"_json;
tRes["_seq_no"] = 0;
tRes["_primary_term"] = 1;
nljson tGet;
tGet["_seq_no"] = 0;
tGet["_primary_term"] = 1;
tGet["found"] = true;
tGet["_source"] = tDoc;
tRes["get"] = tGet;
std::string sRes = tRes.dump();
HttpBuildReply ( dResult, EHTTP_STATUS::_200, sRes.c_str(), sRes.length(), false );
}
bool HttpCompatHandler_c::ProcessUpdateDoc()
{
CSphString sIndex;
ExpandIndexes ( GetUrlParts()[0], sIndex );
const CSphString & sId = GetUrlParts()[2];
if ( sIndex.IsEmpty() )
{
ReportMissedIndex ( GetUrlParts()[0] );
return false;
}
if ( IsEmpty ( GetBody() ) )
{
ReportError ( "Validation Failed: 1: script or doc is missing", HttpErrorType_e::ContentParse, EHTTP_STATUS::_400 );
return false;
}
nljson tUpd = nljson::parse ( GetBody().first, nullptr, false );
if ( tUpd.is_discarded() )
{
ReportError ( "invalid body", HttpErrorType_e::ContentParse, EHTTP_STATUS::_400 );
return false;
}
bool bHasScript = tUpd.contains ( "script" );
nljson::json_pointer tScriptName ( "/script/source" );
nljson::json_pointer tScriptParamsName ( "/script/params" );
if ( bHasScript &&( !tUpd.contains ( tScriptName ) || !tUpd.contains ( tScriptParamsName ) ) )
{
ReportMissedScript ( sIndex );
return false;
}
fnScript * pUpdateScript = nullptr;
if ( bHasScript )
{
// compact script by removing space characters and get script function
CSphString sScript = tUpd[tScriptName].get<std::string>().c_str();
StripSpaces ( const_cast<char *>( sScript.cstr() ) );
pUpdateScript = g_hScripts ( sScript );
if ( !pUpdateScript )
{
ReportMissedScript ( sIndex );
return false;
}
}
DocIdVer_t dIds;
if ( !GetDocIds ( sIndex.cstr(), sId.cstr(), dIds, m_sError ) )
{
CompatWarning ( "%s", m_sError.cstr() );
return false;
}
// validate document source for insert new document
nljson::json_pointer tSrcName ( "/upsert" );
if ( !dIds.GetLength() && !tUpd.contains ( tSrcName ) )
{
CompatWarning ( "doc '%s' source '%s' missed", sId.cstr(), tSrcName.to_string().c_str() );
m_sError.SetSprintf ( "[_doc][%s]: document missing", sId.cstr() );
ReportError ( nullptr, HttpErrorType_e::DocumentMissing, EHTTP_STATUS::_404 );
return true;
}
int iVersion = 1;
ComplexFields_t dComplexFields;
if ( !GetIndexComplexFields ( sIndex, dComplexFields ) )
return false;
// create new document
if ( !dIds.GetLength() )
{
const nljson & tSrc = tUpd[tSrcName];
if ( !InsertDoc ( sIndex, dComplexFields, tSrc, false, sId.cstr(), iVersion, m_sError ) )
{
CompatWarning ( "doc '%s', error: %s", sId.cstr(), m_sError.cstr() );
m_sError.SetSprintf ( "[%s]: version conflict, document already exists (current version [%d])", sId.cstr(), iVersion );
ReportError ( nullptr, HttpErrorType_e::VersionConflictEngine, EHTTP_STATUS::_409, sIndex.cstr() );
return true;
} else
{
ReportUpated ( sId.cstr(), iVersion, sIndex.cstr(), "created", tSrc, GetResult() );
return true;
}
}
if ( dIds.GetLength()!=1 )
{
CompatWarning ( "multiple %d documents found for '%s'", dIds.GetLength(), sId.cstr() );
ReportError ( "failed to execute script", HttpErrorType_e::IllegalArgument, EHTTP_STATUS::_400 );
return false;
}
if ( dIds[0].first!=sId )
{
CompatWarning ( "wrong document found '%s' for '%s'", dIds[0].first.cstr(), sId.cstr() );
ReportError ( "failed to execute script", HttpErrorType_e::IllegalArgument, EHTTP_STATUS::_400 );
return false;
}
auto tServed ( GetServed ( sIndex ) );
if ( !tServed )
{
ReportMissedIndex ( sIndex );
return false;
}
RIdx_c pIndex ( tServed );
DocstoreSession_c tSession;
pIndex->CreateReader ( tSession.GetUID() );
CSphVector<BYTE> dRawDoc;
//const auto & tDocId = dIds[0];
iVersion = dIds[0].second + 1;
if ( !GetIndexDoc ( pIndex, sId.cstr(), tSession.GetUID(), dRawDoc, m_sError ) )
{
CompatWarning ( "%s", m_sError.cstr() );
return false;
}
nljson tSrc = nljson::parse ( dRawDoc.Begin() );
// update raw source document
if ( bHasScript )
{
assert ( pUpdateScript );
if ( !((*pUpdateScript)( tUpd[tScriptParamsName], iVersion, tSrc, m_sError ) ) )
{
CompatWarning ( "%s", m_sError.cstr() );
ReportError ( "failed to execute script", HttpErrorType_e::IllegalArgument, EHTTP_STATUS::_400 );
return true;
}
} else if ( tUpd.contains ( "doc" ) )
{
const nljson & tDocUpd = tUpd["doc"];
tSrc.update ( tDocUpd );
} else
{
CompatWarning ( "doc '%s' source 'doc' missed", sId.cstr() );
m_sError.SetSprintf ( "[_doc][%s]: document missing", sId.cstr() );
ReportError ( nullptr, HttpErrorType_e::DocumentMissing, EHTTP_STATUS::_404 );
return true;
}
// reinsert updated document
if ( !InsertDoc ( sIndex, dComplexFields, tSrc, true, sId.cstr(), iVersion, m_sError ) )
{
CompatWarning ( "doc '%s', error: %s", sId.cstr(), m_sError.cstr() );
m_sError.SetSprintf ( "[%s]: version conflict, document already exists (current version [%d])", sId.cstr(), iVersion );
ReportError ( nullptr, HttpErrorType_e::VersionConflictEngine, EHTTP_STATUS::_409, sIndex.cstr() );
return true;
}
ReportUpated ( sId.cstr(), iVersion, sIndex.cstr(), "updated", tSrc, GetResult() );
return true;
}
static void DropTable ( const CSphString & sName )
{
CSphString sError;
if ( !DropIndexInt ( sName, true, sError ) )
CompatWarning ( "%s", sError.cstr() );
{
CSphVector<CSphString> dIndexes;
ScWL_t tLock ( g_tLockAlias );
for ( auto tIt : g_hAlias )
{
const CSphString & sAliasIndex = tIt.second;
if ( sName==sAliasIndex )
dIndexes.Add ( tIt.first );
}
for ( const auto & sIndex : dIndexes )
g_hAlias.Delete ( sIndex );
}
{
ScWL_t tLockTbl ( g_tLockKbnTable );
g_tKbnTable.erase ( sName.cstr() );
}
}
bool HttpCompatHandler_c::ProcessCreateTable()
{
assert ( GetUrlParts().GetLength() );
const CSphString & sName = GetUrlParts()[0];
bool bDropExistTable = false;
{
auto tIndex ( GetServed ( sName ) );
if ( tIndex )
{
ScRL_t tLockTbl ( g_tLockKbnTable );
bDropExistTable = !g_tKbnTable.contains ( sName.cstr() );
if ( !bDropExistTable )
{
m_sError.SetSprintf ( "index [%s] already exists", sName.cstr() );
ReportError ( nullptr, HttpErrorType_e::ResourceAlreadyExists, EHTTP_STATUS::_400, sName.cstr() );
return true;
}
}
}
if ( IsEmpty ( GetBody() ) )
{
ReportError ( "request body or source parameter is required", HttpErrorType_e::Parse, EHTTP_STATUS::_400 );
return false;
}
nljson tTbl = nljson::parse ( GetBody().first, nullptr, false );
if ( tTbl.is_discarded() )
{
ReportError ( "request body or source parameter is required", HttpErrorType_e::Parse, EHTTP_STATUS::_400 );
return false;
}
// direct create index path (wo template)
if ( !tTbl.contains( "mappings" ) )
{
ReportError ( "request body mappings is required", HttpErrorType_e::Parse, EHTTP_STATUS::_400 );
return false;
}
// need to delete table loaded by manticore but without meta at json file
DropTable ( sName );
ComplexFields_t dFields;
CreateTableSettings_t tOpts;
CreateKbnTable ( tOpts, tTbl, dFields );
StrVec_t dWarnings;
if ( !CreateNewIndexConfigless ( sName, tOpts, dWarnings, m_sError ) )
CompatWarning ( "%s", m_sError.cstr() );
for ( const CSphString & sWarn : dWarnings )
CompatWarning ( "%s", sWarn.cstr() );
{
ScWL_t tLockTbl ( g_tLockKbnTable );
g_tKbnTable[sName.cstr()] = tTbl;
}
COMPATINFO << "created table '" << sName.cstr() << "'";
CreateAliases(); // FIXME!!! create only this table alias
nljson tRes = R"(
{
"acknowledged": true,
"shards_acknowledged": true
})"_json;
tRes["index"] = sName.cstr();
std::string sRes = tRes.dump();
BuildReply ( FromStd ( sRes ), EHTTP_STATUS::_200 );
return true;
}
void HttpCompatHandler_c::ProcessDeleteTable()
{
const CSphString & sName = GetUrlParts()[0];
{
auto tIndex ( GetServed ( sName ) );
if ( !tIndex )
{
m_sError.SetSprintf ( "no such index [%s]", sName.cstr() );
ReportError ( nullptr, HttpErrorType_e::IndexNotFound, EHTTP_STATUS::_404, sName.cstr() );
return;
}
}
DropTable ( sName );
const char * sRes = "{ \"acknowledged\": true }";
BuildReply ( FromSz ( sRes ), EHTTP_STATUS::_200 );
}
void HttpCompatHandler_c::ProcessAliasGet()
{
CSphString sIndex;
StrVec_t dFilters;
if ( GetUrlParts().GetLength()>=2 && GetUrlParts()[1]=="_alias" )
{
sIndex = GetUrlParts()[0];
if ( GetUrlParts().GetLength()>=3 )
sphSplit ( dFilters, GetUrlParts()[2].cstr(), "," );
} else if ( GetUrlParts().GetLength()>=2 && GetUrlParts()[0]=="_alias" )
{
sphSplit ( dFilters, GetUrlParts()[1].cstr(), "," );
}
dFilters.Apply ( [] ( CSphString & sItem ) {
if ( sItem=="_all" )
sItem = "*";
});
nljson tRes = R"({})"_json;
{
ScRL_t tLock ( g_tLockAlias );
for ( const auto & tIt : g_hAlias )
{
const CSphString & sAliasName = tIt.first;
const CSphString & sAliasIndex = tIt.second;
if ( !sIndex.IsEmpty() && !sphWildcardMatch ( sAliasIndex.cstr(), sIndex.cstr() ) )
continue;
if ( dFilters.GetLength() && !dFilters.any_of ( [&] ( const CSphString & sFilter ) { return sphWildcardMatch ( sAliasName.cstr(), sFilter.cstr() ); } ) )
continue;
if ( !tRes.contains ( sAliasIndex.cstr() ) )
tRes[sAliasIndex.cstr()] = R"({ "aliases": {} })"_json;
tRes[sAliasIndex.cstr()]["aliases"][sAliasName.cstr()] = R"({})"_json;
}
}
std::string sRes = tRes.dump();
BuildReplyHead ( FromStd ( sRes ), EHTTP_STATUS::_200 );
}
// FIXME!!! add support of these forms too
// PUT /<index>/_alias/<alias>
// POST /<index>/_alias/<alias>
// PUT /<index>/_aliases/<alias>
// POST /<index>/_aliases/<alias>
void HttpCompatHandler_c::ProcessAliasSet()
{
nljson tAliases = nljson::parse ( GetBody().first );
if ( tAliases.contains( "actions" ) )
{
// FIXME!!! add support of aliases and indices options
nljson::json_pointer tIndexName ( "/index" );
nljson::json_pointer tAliasName ( "/alias" );
for ( const auto & tIt : tAliases["actions"].items() )
{
const auto & tItem = tIt.value().cbegin();
if ( !tItem.value().contains ( tIndexName ) || !tItem.value().contains ( tAliasName ) )
{
ReportError ( "[aliases] failed to parse field [actions]", HttpErrorType_e::ContentParse, EHTTP_STATUS::_400 );
return;
}
CSphString sIndex = tItem.value()[tIndexName].get<std::string>().c_str();
CSphString sAlias = tItem.value()[tAliasName].get<std::string>().c_str();
{
auto tIndex ( GetServed ( sIndex ) );
if ( !tIndex )
{
ReportMissedIndex ( sIndex );
return;
}
}
if ( tItem.key()=="add" )
{
{
ScWL_t tLock ( g_tLockAlias );
g_hAlias.Add ( sIndex, sAlias );
}
{
ScWL_t tLock ( g_tLockKbnTable );
if ( g_tKbnTable.contains ( sIndex.cstr() ) )
{
nljson & tIdx = g_tKbnTable[sIndex.cstr()];
if ( !tIdx.contains ( "aliases" ) )
tIdx["aliases"] = R"({})"_json;
tIdx["aliases"][sAlias.cstr()] = R"({})"_json;
}
}
} else if ( tItem.key()=="remove" )
{
{
ScWL_t tLock ( g_tLockAlias );
if ( !g_hAlias.Delete ( sAlias ) )
{
CSphString sError;
sError.SetSprintf ( "aliases [%s] missing", sAlias.cstr() );
ReportError ( sError.cstr(), HttpErrorType_e::AliasesNotFound, EHTTP_STATUS::_404 );
return;
}
}
{
ScWL_t tLock ( g_tLockKbnTable );
if ( g_tKbnTable.contains ( sIndex.cstr() ) )
{
nljson & tIdx = g_tKbnTable[sIndex.cstr()];
if ( tIdx.contains ( "aliases" ) )
tIdx["aliases"].erase ( sAlias.cstr() );
}
}
} else if ( tItem.key()=="remove_index" )
{
DropTable ( sIndex );
} else
{
ReportError ( "[aliases] failed to parse field [actions]", HttpErrorType_e::ContentParse, EHTTP_STATUS::_400 );
return;
}
}
}
const char * sRes = "{ \"acknowledged\": true }";
BuildReply ( FromSz ( sRes ), EHTTP_STATUS::_200 );
}
void HttpCompatHandler_c::ProcessRefresh ( const CSphString * pName )
{
if ( pName )
{
auto tIndex ( GetServed ( *pName ) );
if ( !tIndex )
{
ReportMissedIndex ( *pName );
return;
}
}
const char * sRes = "{ \"_shards\": { \"total\": 1, \"successful\": 1, \"failed\": 0 } }";
BuildReply ( FromSz ( sRes ), EHTTP_STATUS::_200 );
}
void HttpCompatHandler_c::ProcessCCR()
{
const char * sRes = "{ \"follower_indices\": [] }";
BuildReplyHead ( FromSz ( sRes ), EHTTP_STATUS::_200 );
}
void HttpCompatHandler_c::ProcessILM()
{
nljson tItems = R"({})"_json;
ServedSnap_t hLocal = g_pLocalIndexes->GetHash();
for ( const auto & tIt : *hLocal )
{
if ( !tIt.second )
continue;
nljson tItem = R"({"managed": false})"_json;
tItem["index"] = tIt.first.cstr();
tItems[tIt.first.cstr()] = tItem;
}
nljson tRes = R"({})"_json;
tRes["indices"] = tItems;
std::string sRes = tRes.dump();
BuildReplyHead ( FromStd ( sRes ), EHTTP_STATUS::_200 );
}
static nljson GetFieldDesc ( const CSphColumnInfo & tCol, bool bField )
{
nljson tField = R"({"searchable":false, "aggregatable": false})"_json;
if ( bField )
{
tField["type"] = "text";
tField["searchable"] = true;
} else if ( tCol.m_eAttrType!=SPH_ATTR_NONE )
{
switch ( tCol.m_eAttrType )
{
case SPH_ATTR_TIMESTAMP:
tField["type"] = "date";
tField["aggregatable"] = true;
tField["searchable"] = true;
break;
case SPH_ATTR_INTEGER:
case SPH_ATTR_BOOL:
tField["type"] = "integer";
tField["aggregatable"] = true;
tField["searchable"] = true;
break;
case SPH_ATTR_FLOAT:
tField["type"] = "float";
tField["searchable"] = true;
break;
case SPH_ATTR_BIGINT:
tField["type"] = "long";
tField["aggregatable"] = true;
tField["searchable"] = true;
break;
case SPH_ATTR_STRING:
tField["type"] = "keyword";
tField["aggregatable"] = true;
tField["searchable"] = true;
break;
case SPH_ATTR_JSON:
tField["type"] = "object";
tField["searchable"] = true;
break;
default:
break;
}
}
return tField;
}
static bool CheckFieldDesc ( const nljson & tFields, const char * sColName, const nljson & tDesc, const CSphString & sIndex, StringBuilder_c & sError )
{
if ( !tDesc.contains ( "type" ) )
{
sError.Appendf ( "index '%s' has unmapped column '%s'", sIndex.cstr(), sColName );
return false;
}
if ( !tFields.contains ( sColName ) )
return true;
assert ( tDesc.contains ( "type" ) );
if ( tFields[sColName].contains ( tDesc["type"] ) )
return true;
const nljson & tField = tFields[sColName];
sError.Appendf ( "'%s' already has type '%s' but index '%s' type is '%s'", sColName, tField.cbegin().key().c_str(), sIndex.cstr(), tDesc["type"].get<std::string>().c_str() );
return false;
}
static void AddSpecialColumns ( nljson & tFields )
{
tFields["_index"] = R"( { "_index": { "type": "_index", "searchable": false, "aggregatable": false } } )"_json;
tFields["_feature"] = R"( { "_feature": { "type": "_feature", "searchable": false, "aggregatable": false } } )"_json;
tFields["_ignored"] = R"( { "_ignored": { "type": "_ignored", "searchable": false, "aggregatable": false } } )"_json;
tFields["_version"] = R"( { "_version": { "type": "_version", "searchable": false, "aggregatable": false } } )"_json;
tFields["_type"] = R"( { "_type": { "type": "_type", "searchable": false, "aggregatable": false } } )"_json;
tFields["_seq_no"] = R"( { "_seq_no": { "type": "_seq_no", "searchable": false, "aggregatable": false } } )"_json;
tFields["_field_names"] = R"( { "_field_names": { "type": "_field_names", "searchable": false, "aggregatable": false } } )"_json;
tFields["_source"] = R"( { "_source": { "type": "_source", "searchable": false, "aggregatable": false } } )"_json;
tFields["_id"] = R"( { "_id": { "type": "_id", "searchable": false, "aggregatable": false } } )"_json;
}
// FIXME!!! add support of Elastic \ Kibana tables
void HttpCompatHandler_c::ProcessFields()
{
const CSphString & sIndex = GetUrlParts()[0];
nljson tRes = R"({"indices":[], "fields":{}})"_json;
StringBuilder_c sError ( "," );
ServedSnap_t hLocal = g_pLocalIndexes->GetHash();
for ( const auto & tIt : *hLocal )
{
if ( !tIt.second )
continue;
if ( !sIndex.IsEmpty() && !sphWildcardMatch ( tIt.first.cstr(), sIndex.cstr() ) )
continue;
RIdx_c tIdx ( tIt.second );
tRes["indices"].push_back ( tIt.first.cstr() );
const CSphSchema & tSchema = tIdx->GetMatchSchema();
for ( const CSphColumnInfo & tCol : tSchema.GetFields() )
{
// field-string types will be processed at attributes
if ( tSchema.GetAttr ( tCol.m_sName.cstr() ) )
continue;
const char * sColName = tCol.m_sName.cstr();
nljson tField = GetFieldDesc ( tCol, true );
if ( !CheckFieldDesc ( tRes["fields"], sColName, tField, sIndex, sError ) )
continue;
tRes["fields"][sColName][tField["type"].get<std::string>()] = tField;
}
for ( int i=0; i<tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tCol = tSchema.GetAttr ( i );
// skip special only manticore related attributes and tokecounts types
if ( tCol.m_sName=="@version" || tCol.m_sName==sphGetBlobLocatorName() || tCol.m_eAttrType==SPH_ATTR_TOKENCOUNT )
continue;
bool bField = ( tSchema.GetField ( tCol.m_sName.cstr() ) );
nljson tField = GetFieldDesc ( tCol, bField );
if ( !CheckFieldDesc ( tRes["fields"], tCol.m_sName.cstr(), tField, sIndex, sError ) )
continue;
tRes["fields"][tCol.m_sName.cstr()][tField["type"].get<std::string>()] = tField;
}
}
// add special only elastic related attributes
AddSpecialColumns ( tRes["fields"] );
if ( !sError.IsEmpty() )
CompatWarning ( "%s", sError.cstr() );
std::string sRes = tRes.dump();
BuildReply ( FromStd ( sRes ), EHTTP_STATUS::_200 );
}
bool HttpCompatHandler_c::ProcessEndpoints()
{
if ( m_dUrlParts.GetLength() && m_dUrlParts.Last().Ends ( "_msearch" ) )
return ProcessMSearch();
if ( GetRequestType()==HTTP_GET || GetRequestType()==HTTP_HEAD )
{
if ( !m_dUrlParts.GetLength() )
{
ProcessEmptyHead();
return true;
}
if ( m_dUrlParts.GetLength()>1 && m_dUrlParts[1].Begins( "_" ) &&
m_dUrlParts[1]=="_doc" &&
( m_dUrlParts[0]==g_sKbnTableName || m_dUrlParts[0]==g_sKbnTableAlias ) &&
ProcessKbnTableDoc() )
return true;
if ( m_dUrlParts.GetLength()>=2 && m_dUrlParts[0]=="_cat" )
{
ProcessCat();
return true;
}
if ( ( m_dUrlParts.GetLength() && m_dUrlParts[0]=="_alias" ) || ( m_dUrlParts.GetLength()>=2 && m_dUrlParts[1]=="_alias" ) )
{
ProcessAliasGet();
return true;
}
if ( ( m_dUrlParts.GetLength() && m_dUrlParts[0]=="_rollup" ) || ( m_dUrlParts.GetLength()>=2 && m_dUrlParts[1]=="_rollup" )
|| ( m_dUrlParts.GetLength() && m_dUrlParts[0]=="_ingest" ) )
{
EmptyReply();
return true;
}
if ( m_dUrlParts.GetLength()>=2 && m_dUrlParts[1]=="_ilm" )
{
ProcessILM();
return true;
}
if ( m_dUrlParts.GetLength()>=2 && m_dUrlParts[1]=="_ccr" )
{
ProcessCCR();
return true;
}
if ( m_dUrlParts.GetLength() )
{
ProcessKbnTableGet();
return true;
}
}
if ( GetRequestType()==HTTP_POST && m_dUrlParts.GetLength()>1 && m_dUrlParts[1]=="_search" )
return ProcessSearch();
if ( GetRequestType()==HTTP_POST && m_dUrlParts.GetLength()>1 && m_dUrlParts[1]=="_count" )
{
ProcessCount();
return true;
}
if ( ( GetRequestType()==HTTP_POST || GetRequestType()==HTTP_PUT )
&& m_dUrlParts.GetLength()>1 && ( m_dUrlParts[1]=="_doc" || m_dUrlParts[1]=="_create" )
&& ProcessInsert() )
return true;
if ( GetRequestType()==HTTP_POST && m_dUrlParts.GetLength()>0 && m_dUrlParts[0]=="_mget" )
{
ProcessKbnTableMGet();
return true;
}
if ( GetRequestType()==HTTP_PUT )
{
if ( m_dUrlParts.GetLength()>1 && ( m_dUrlParts[0]=="_template" || m_dUrlParts[0]=="_monitoring" ) )
{
ProcessPutTemplate();
return true;
}
if ( m_dUrlParts.GetLength() && m_dUrlParts[0]=="_monitoring" )
{
ProcessIgnored();
return true;
}
if ( m_dUrlParts.GetLength() )
return ProcessCreateTable();
}
if ( GetRequestType()==HTTP_DELETE && m_dUrlParts.GetLength()>2 && m_dUrlParts[1]=="_doc"
&& ProcessDeleteDoc() )
return true;
if ( GetRequestType()==HTTP_POST && m_dUrlParts.GetLength()>2 && m_dUrlParts[1]=="_update" )
return ProcessUpdateDoc();
if ( GetRequestType()==HTTP_DELETE && m_dUrlParts.GetLength()==1 )
{
ProcessDeleteTable();
return true;
}
if ( GetRequestType()==HTTP_POST && m_dUrlParts.GetLength()>0 && m_dUrlParts[0]=="_aliases" )
{
ProcessAliasSet();
return true;
}
if ( GetRequestType()==HTTP_POST && m_dUrlParts.GetLength()>=2 && m_dUrlParts[1]=="_refresh" )
{
ProcessRefresh ( m_dUrlParts.Begin() );
return true;
}
if ( GetRequestType()==HTTP_POST && m_dUrlParts.GetLength()>=1 && m_dUrlParts[0]=="_refresh" )
{
ProcessRefresh ( nullptr );
return true;
}
if ( GetRequestType()==HTTP_POST && m_dUrlParts.GetLength()>=2 && m_dUrlParts[1]=="_field_caps" )
{
ProcessFields();
return true;
}
if ( ( GetRequestType()==HTTP_POST || GetRequestType()==HTTP_PUT ) && m_dUrlParts.GetLength() && m_dUrlParts[0]=="_monitoring" )
{
ProcessIgnored();
return true;
}
FormatError ( EHTTP_STATUS::_501, "%s - unsupported endpoint", GetFullURL().cstr() );
return false;
}
static void DropKbnTables()
{
CSphVector<CSphString> dIdx;
{
ScRL_t tLock ( g_tLockKbnTable );
for ( auto & tTbl : g_tKbnTable.items() )
dIdx.Add ( tTbl.key().c_str() );
}
{
ScWL_t tLock ( g_tLockKbnTable );
g_tKbnTable = "{}"_json;
}
{
ScWL_t tLock ( g_tLockAlias );
g_hAlias.Reset();
}
// look for local indexes with kibana names
ServedSnap_t hLocal = g_pLocalIndexes->GetHash();
for ( const auto & tIt : *hLocal )
{
if ( !tIt.second )
continue;
for ( const CSphString & sName : g_dKbnTablesNames )
{
if ( tIt.first.Begins ( sName.cstr() ) )
{
dIdx.Add ( tIt.first );
break;
}
}
}
dIdx.Uniq();
CSphString sError;
for ( const CSphString & sName : dIdx )
{
if ( !DropIndexInt ( sName, true, sError ) )
CompatWarning ( "%s", sError.cstr() );
}
if ( dIdx.GetLength() )
CompatWarning ( "dropped %d system tables", dIdx.GetLength() );
}
bool SetLogManagement ( const CSphString & sVal, CSphString & sError )
{
g_bEnabled = ( sVal=="on" || sVal=="1" || sVal=="dashboards" );
DropKbnTables();
if ( IsLogManagementEnabled() )
{
nljson tSys = GetSystemTable();
g_tKbnTable.update ( tSys );
CreateKbnIndexes();
CreateAliases();
}
return true;
}
bool IsLogManagementEnabled ()
{
return g_bEnabled;
}
HttpCompatBaseHandler_c::HttpCompatBaseHandler_c ( Str_t sBody, int iReqType, const SmallStringHash_T<CSphString> & hOpts )
: m_sBody ( sBody )
, m_iReqType ( iReqType )
, m_hOpts ( hOpts )
{
const CSphString & sEndpoint = m_hOpts["endpoint"];
m_dUrlParts = SplitURL ( sEndpoint );
}
HttpCompatHandler_c::HttpCompatHandler_c ( Str_t sBody, int iReqType, const SmallStringHash_T<CSphString> & hOpts )
: HttpCompatBaseHandler_c ( sBody, iReqType, hOpts )
{
}
bool HttpCompatHandler_c::Process()
{
CSphString sRefBody;
bool bDumpHttp = false;
if ( !m_sLogHttpFilter.IsEmpty() && sphWildcardMatch ( GetOptions()["endpoint"].cstr(), m_sLogHttpFilter.cstr() ) )
{
bDumpHttp = true;
sRefBody = GetBody();
}
bool bOk = ProcessEndpoints();
if ( !bOk || !m_sError.IsEmpty() )
{
bOk = false;
if ( m_sError.IsEmpty() )
FormatError ( EHTTP_STATUS::_501, "%s - unsupported endpoint", GetFullURL().cstr() );
COMPATINFO << m_sError.cstr();
}
if ( !bOk || bDumpHttp )
{
if ( !bOk )
DumpHttp ( GetRequestType(), GetFullURL(), GetBody() );
else
DumpHttp ( GetRequestType(), GetFullURL(), FromStr ( sRefBody ), m_dData );
}
ScopedMutex_t tLock ( m_tReqStatLock );
m_tReqStat.AddUnique ( GetFullURL() )++;
return bOk;
}
void HttpCompatHandler_c::SetLogFilter ( const CSphString & sVal )
{
if ( sVal=="dumpq" )
{
StringBuilder_c tOut ( "\n" );
ScopedMutex_t tLock ( m_tReqStatLock );
for ( const auto & tIt : m_tReqStat )
tOut.Appendf ( "%s, total = %d", tIt.first.cstr(), tIt.second );
tOut.Appendf ( "total %d\n", m_tReqStat.GetLength() );
COMPATINFO << tOut;
return;
}
m_sLogHttpFilter = sVal;
}
void HttpCompatBaseHandler_c::BuildReplyHead ( Str_t sRes, EHTTP_STATUS eStatus )
{
m_eHttpCode = eStatus;
HttpBuildReplyHead ( GetResult(), eStatus, sRes.first, sRes.second, IsHead() );
}
void HttpCompatHandler_c::EmptyReply()
{
const char * sRes = "{}";
BuildReplyHead ( FromSz ( sRes ), EHTTP_STATUS::_200 );
}
void HttpCompatHandler_c::ReportMissedIndex ( const CSphString & sIndex )
{
m_sError.SetSprintf ( "no such index [%s]", sIndex.cstr() );
ReportError ( nullptr, HttpErrorType_e::IndexNotFound, EHTTP_STATUS::_404, sIndex.cstr() );
}
void HttpCompatHandler_c::ReportIncorrectMethod ( const char * sAllowed )
{
m_sError.SetSprintf ( "Incorrect HTTP method for uri [%s] and method [%s], allowed: [%s]", GetFullURL().cstr(), http_method_str ( (http_method)GetRequestType() ), sAllowed );
ReportError ( nullptr, HttpErrorType_e::Unknown, EHTTP_STATUS::_405, nullptr );
}
void HttpCompatHandler_c::ReportMissedScript ( const CSphString & sIndex )
{
CompatWarning ( "missed script '%s' at '%s' body '%s'", sIndex.cstr(), GetFullURL().cstr(), GetBody().first );
ReportError ( "failed to execute script", HttpErrorType_e::IllegalArgument, EHTTP_STATUS::_400 );
}
CSphMutex HttpCompatHandler_c::m_tReqStatLock;
SmallStringHash_T<int> HttpCompatHandler_c::m_tReqStat;
CSphString HttpCompatHandler_c::m_sLogHttpFilter;
std::unique_ptr<HttpHandler_c> CreateCompatHandler ( Str_t sBody, int iReqType, const SmallStringHash_T<CSphString> & hOpts )
{
if ( IsLogManagementEnabled() )
return std::make_unique<HttpCompatHandler_c>( sBody, iReqType, hOpts );
return nullptr;
}
void SetLogHttpFilter ( const CSphString & sVal )
{
HttpCompatHandler_c::SetLogFilter ( sVal );
}
| 97,949
|
C++
|
.cpp
| 3,115
| 28.516212
| 217
| 0.658355
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,911
|
schematransform.cpp
|
manticoresoftware_manticoresearch/src/schematransform.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "schematransform.h"
#include "queuecreator.h"
#include "sphinxjson.h"
template <typename FN>
static void FnSortGetStringRemap ( const ISphSchema & tDstSchema, const ISphSchema & tSrcSchema, FN fnProcess )
{
for ( int i = 0; i<tDstSchema.GetAttrsCount (); ++i )
{
const CSphColumnInfo & tDst = tDstSchema.GetAttr ( i );
// remap only static strings
if ( tDst.m_eAttrType==SPH_ATTR_STRINGPTR || !IsSortStringInternal ( tDst.m_sName ) )
continue;
auto iSrcCol = tSrcSchema.GetAttrIndex ( tDst.m_sName.cstr ()+sizeof ( GetInternalAttrPrefix() )-1 );
if ( iSrcCol!=-1 ) // skip internal attributes received from agents
fnProcess ( iSrcCol, i );
}
}
int GetStringRemapCount ( const ISphSchema & tDstSchema, const ISphSchema & tSrcSchema )
{
int iMaps = 0;
FnSortGetStringRemap ( tDstSchema, tSrcSchema, [&iMaps] ( int, int ) { ++iMaps; } );
return iMaps;
}
static BYTE * RepackNullMaskStr ( const BYTE * pMask, int iNumDstAttrs, const IntVec_t & dNullRemap )
{
ByteBlob_t dUnpacked = sphUnpackPtrAttr(pMask);
int iNumElements = dUnpacked.second*8;
BitVec_T<BYTE> tSrcMask ( (BYTE*)dUnpacked.first, iNumElements );
BitVec_T<BYTE> tDstMask ( iNumDstAttrs );
for ( int i = 0; i < iNumElements; i++ )
if ( tSrcMask.BitGet(i) && dNullRemap[i]!=-1 )
tDstMask.BitSet ( dNullRemap[i] );
return sphPackPtrAttr ( { tDstMask.Begin(), tDstMask.GetSizeBytes() } );
}
static SphAttr_t RepackNullMaskInt ( SphAttr_t uMask, const IntVec_t & dNullRemap )
{
SphAttr_t uValue = 0;
for ( int i = 0; i < dNullRemap.GetLength(); i++ )
if ( ( uMask & ( 1ULL << i ) ) && dNullRemap[i]!=-1 )
uValue |= 1ULL << dNullRemap[i];
return uValue;
}
static IntVec_t SetupNullMaskRemap ( const ISphSchema & tOldSchema, const ISphSchema & tNewSchema )
{
IntVec_t dNullRemap;
if ( !tOldSchema.GetAttr ( GetNullMaskAttrName() ) )
return dNullRemap;
for ( int i = 0; i < tOldSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tOldAttr = tOldSchema.GetAttr(i);
if ( tOldAttr.m_tLocator.m_bDynamic )
dNullRemap.Add ( tNewSchema.GetAttrIndex ( tOldAttr.m_sName.cstr() ) );
}
return dNullRemap;
}
void RemapNullMask ( VecTraits_T<CSphMatch> & dMatches, const CSphSchema & tOldSchema, CSphSchema & tNewSchema )
{
const CSphColumnInfo * pOld = tOldSchema.GetAttr ( GetNullMaskAttrName() );
if ( !pOld )
return;
const CSphColumnInfo * pNew = tNewSchema.GetAttr ( GetNullMaskAttrName() );
assert(pNew);
int iNumDynamicAttrs = tNewSchema.GetAttrsCount()-1; // one is null mask which we exclude
// we assume that we don't change null mask type
assert ( pNew->m_eAttrType==DetermineNullMaskType(iNumDynamicAttrs) );
IntVec_t dNullRemap = SetupNullMaskRemap ( tOldSchema, tNewSchema );
if ( pOld->m_eAttrType==SPH_ATTR_STRINGPTR )
{
for ( auto & i : dMatches )
{
BYTE * pOldMask = (BYTE *)i.GetAttr ( pOld->m_tLocator );
BYTE * pNewMask = RepackNullMaskStr ( pOldMask, iNumDynamicAttrs, dNullRemap );
SafeDeleteArray(pOldMask);
i.SetAttr ( pNew->m_tLocator, (SphAttr_t)pNewMask );
}
}
else
{
for ( auto & i : dMatches )
i.SetAttr ( pNew->m_tLocator, RepackNullMaskInt ( i.GetAttr ( pOld->m_tLocator ), dNullRemap ) );
}
}
//////////////////////////////////////////////////////////////////////////
TransformedSchemaBuilder_c::TransformedSchemaBuilder_c ( const ISphSchema & tOldSchema, CSphSchema & tNewSchema )
: m_tOldSchema ( tOldSchema )
, m_tNewSchema ( tNewSchema )
{}
void TransformedSchemaBuilder_c::AddAttr ( const CSphString & sName )
{
const CSphColumnInfo * pAttr = m_tOldSchema.GetAttr ( sName.cstr() );
if ( !pAttr )
return;
CSphColumnInfo tAttr = *pAttr;
tAttr.m_tLocator.Reset();
if ( tAttr.m_iIndex==-1 )
tAttr.m_iIndex = m_tOldSchema.GetAttrIndexOriginal ( tAttr.m_sName.cstr() );
// check if new columnar attributes were added (that were not in the select list originally)
if ( tAttr.IsColumnar() )
ReplaceColumnarAttrWithExpression ( tAttr, m_tNewSchema.GetAttrsCount() );
tAttr.m_eAttrType = sphPlainAttrToPtrAttr ( tAttr.m_eAttrType );
m_tNewSchema.AddAttr ( tAttr, true );
}
void TransformedSchemaBuilder_c::Finalize()
{
const CSphColumnInfo * pOld = m_tOldSchema.GetAttr ( GetNullMaskAttrName() );
if ( !pOld )
return;
const CSphColumnInfo * pNew = m_tNewSchema.GetAttr ( GetNullMaskAttrName() );
assert(!pNew);
CSphColumnInfo tAttr ( GetNullMaskAttrName(), DetermineNullMaskType ( m_tNewSchema.GetAttrsCount() ) );
m_tNewSchema.AddAttr ( tAttr, true );
}
void TransformedSchemaBuilder_c::ReplaceColumnarAttrWithExpression ( CSphColumnInfo & tAttr, int iLocator )
{
assert ( tAttr.IsColumnar() );
assert ( !tAttr.m_pExpr );
// temporarily add attr to new schema
// when result set is finalized, corresponding columnar expression (will be spawned later)
// will be evaluated and put into the match
// and this expression will be used to fetch that value
tAttr.m_uAttrFlags &= ~CSphColumnInfo::ATTR_COLUMNAR;
tAttr.m_eAttrType = sphPlainAttrToPtrAttr ( tAttr.m_eAttrType );
m_tNewSchema.AddAttr ( tAttr, true );
// parse expression as if it is not columnar
CSphString sError;
ExprParseArgs_t tExprArgs;
tAttr.m_pExpr = sphExprParse ( tAttr.m_sName.cstr(), m_tNewSchema, nullptr, sError, tExprArgs );
assert ( tAttr.m_pExpr );
// now remove it from schema (it will be added later with the supplied expression)
m_tNewSchema.RemoveAttr( tAttr.m_sName.cstr(), true );
}
//////////////////////////////////////////////////////////////////////////
class MatchesToNewSchema_c : public MatchProcessor_i
{
public:
MatchesToNewSchema_c ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema, GetBlobPoolFromMatch_fn fnGetBlobPool, GetColumnarFromMatch_fn fnGetColumnar );
// performs actual processing according created plan
void Process ( CSphMatch * pMatch ) final { ProcessMatch(pMatch); }
void Process ( VecTraits_T<CSphMatch *> & dMatches ) final { dMatches.for_each ( [this]( CSphMatch * pMatch ){ ProcessMatch(pMatch); } ); }
bool ProcessInRowIdOrder() const final { return m_dActions.any_of ( []( const MapAction_t & i ){ return i.IsExprEval(); } ); }
private:
struct MapAction_t
{
// what is to do with current position
enum Action_e
{
SETZERO, // set default (0)
COPY, // copy as is (plain attribute)
COPYBLOB, // deep copy (unpack/pack) the blob
COPYJSONFIELD, // json field (packed blob with type)
EVALEXPR_INT, // evaluate the expression for the recently added int attribute
EVALEXPR_BIGINT, // evaluate the expression for the recently added bigint attribute
EVALEXPR_STR, // evaluate the expression for the recently added string attribute
EVALEXPR_MVA, // evaluate the expression for the recently added mva attribute
NULLMASK_INT2INT, // repack null attribute mask for the new schema
NULLMASK_STR2INT, // repack null attribute mask for the new schema
NULLMASK_INT2STR, // repack null attribute mask for the new schema
NULLMASK_STR2STR // repack null attribute mask for the new schema
};
const CSphAttrLocator * m_pFrom;
const CSphAttrLocator * m_pTo;
ISphExprRefPtr_c m_pExpr;
Action_e m_eAction;
mutable columnar::Columnar_i * m_pPrevColumnar = nullptr;
bool IsExprEval() const
{
return m_eAction==EVALEXPR_INT || m_eAction==EVALEXPR_BIGINT || m_eAction==EVALEXPR_STR || m_eAction==EVALEXPR_MVA;
}
};
int m_iDynamicSize; // target dynamic size, from schema
int m_iNumDstAttrs = 0; // num attrs in dst schema
CSphVector<MapAction_t> m_dActions; // the recipe
CSphVector<std::pair<CSphAttrLocator, CSphAttrLocator>> m_dRemapCmp; // remap @int_attr_ATTR -> ATTR
CSphVector<int> m_dNullRemap; // attr remap for null bitmaps
CSphVector<int> m_dDataPtrAttrs; // orphaned attrs we have to free before swap to new attr
GetBlobPoolFromMatch_fn m_fnGetBlobPool; // provides base for pool copying
GetColumnarFromMatch_fn m_fnGetColumnar; // columnar storage getter
static void SetupAction ( const CSphColumnInfo & tOld, const CSphColumnInfo & tNew, const ISphSchema * pOldSchema, MapAction_t & tAction );
FORCE_INLINE void ProcessMatch ( CSphMatch * pMatch );
FORCE_INLINE void PerformAction ( const MapAction_t & tAction, CSphMatch * pMatch, CSphMatch & tResult, const BYTE * pBlobPool, columnar::Columnar_i * pColumnar );
};
MatchesToNewSchema_c::MatchesToNewSchema_c ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema, GetBlobPoolFromMatch_fn fnGetBlobPool, GetColumnarFromMatch_fn fnGetColumnar )
: m_iDynamicSize ( pNewSchema->GetDynamicSize () )
, m_fnGetBlobPool ( std::move ( fnGetBlobPool ) )
, m_fnGetColumnar ( std::move ( fnGetColumnar ) )
{
assert ( pOldSchema && pNewSchema );
// prepare transforming recipe
// initial state: set all new columns to be reset by default
for ( int i = 0; i<pNewSchema->GetAttrsCount(); ++i )
m_dActions.Add ( { nullptr, &pNewSchema->GetAttr(i).m_tLocator, nullptr, MapAction_t::SETZERO } );
// add mapping from old to new according to column type
for ( int i = 0; i<pOldSchema->GetAttrsCount(); ++i )
{
const CSphColumnInfo & tOld = pOldSchema->GetAttr(i);
auto iNewIdx = pNewSchema->GetAttrIndex ( tOld.m_sName.cstr () );
if ( iNewIdx == -1 )
{
// dataptr present in old, but not in the new - mark it for releasing
if ( sphIsDataPtrAttr ( tOld.m_eAttrType ) && tOld.m_tLocator.m_bDynamic )
m_dDataPtrAttrs.Add( tOld.m_tLocator.m_iBitOffset >> ROWITEM_SHIFT );
continue;
}
const CSphColumnInfo & tNew = pNewSchema->GetAttr(iNewIdx);
auto & tAction = m_dActions[iNewIdx];
SetupAction ( tOld, tNew, pOldSchema, tAction );
}
// need to update @int_attr_ locator to use new schema
// no need to pass pOldSchema as we remap only new schema pointers
// also need to update group sorter keypart to be str_ptr in caller code SetSchema
FnSortGetStringRemap ( *pNewSchema, *pNewSchema, [this, pNewSchema] ( int iSrc, int iDst )
{
m_dRemapCmp.Add ( { pNewSchema->GetAttr(iSrc).m_tLocator, pNewSchema->GetAttr(iDst).m_tLocator } );
} );
m_dNullRemap = SetupNullMaskRemap ( *pOldSchema, *pNewSchema );
m_iNumDstAttrs = pNewSchema->GetAttrsCount();
}
void MatchesToNewSchema_c::SetupAction ( const CSphColumnInfo & tOld, const CSphColumnInfo & tNew, const ISphSchema * pOldSchema, MapAction_t & tAction )
{
tAction.m_pFrom = &tOld.m_tLocator;
if ( tOld.m_sName==GetNullMaskAttrName() )
{
bool bOldInt = tOld.m_eAttrType==SPH_ATTR_INTEGER || tOld.m_eAttrType==SPH_ATTR_BIGINT;
bool bNewInt = tNew.m_eAttrType==SPH_ATTR_INTEGER || tNew.m_eAttrType==SPH_ATTR_BIGINT;
if ( bOldInt )
tAction.m_eAction = bNewInt ? MapAction_t::NULLMASK_INT2INT : MapAction_t::NULLMASK_INT2STR;
else
tAction.m_eAction = bNewInt ? MapAction_t::NULLMASK_STR2INT : MapAction_t::NULLMASK_STR2STR;
return;
}
// columnar attr replaced by an expression
// we now need to create an expression that fetches data from columnar storage
if ( tOld.IsColumnar() && tNew.m_pExpr )
{
CSphString sError;
ExprParseArgs_t tExprArgs;
tAction.m_pExpr = sphExprParse ( tOld.m_sName.cstr(), *pOldSchema, nullptr, sError, tExprArgs );
assert ( tAction.m_pExpr );
switch ( tNew.m_eAttrType )
{
case SPH_ATTR_STRINGPTR: tAction.m_eAction = MapAction_t::EVALEXPR_STR; break;
case SPH_ATTR_BIGINT: tAction.m_eAction = MapAction_t::EVALEXPR_BIGINT; break;
case SPH_ATTR_UINT32SET_PTR:
case SPH_ATTR_INT64SET_PTR:
case SPH_ATTR_FLOAT_VECTOR_PTR: tAction.m_eAction = MapAction_t::EVALEXPR_MVA; break;
default: tAction.m_eAction = MapAction_t::EVALEXPR_INT; break;
}
return;
}
// same type - just copy attr as is
if ( tOld.m_eAttrType==tNew.m_eAttrType )
{
tAction.m_eAction = MapAction_t::COPY;
return;
}
assert ( !sphIsDataPtrAttr ( tOld.m_eAttrType ) && sphIsDataPtrAttr ( tNew.m_eAttrType ) );
if ( tOld.m_eAttrType==SPH_ATTR_JSON_FIELD )
tAction.m_eAction = MapAction_t::COPYJSONFIELD;
else
tAction.m_eAction = MapAction_t::COPYBLOB;
}
void MatchesToNewSchema_c::ProcessMatch ( CSphMatch * pMatch )
{
CSphMatch tResult;
tResult.Reset ( m_iDynamicSize );
const BYTE * pBlobPool = m_fnGetBlobPool(pMatch);
columnar::Columnar_i * pColumnar = m_fnGetColumnar(pMatch);
for ( const auto & tAction : m_dActions )
PerformAction ( tAction, pMatch, tResult, pBlobPool, pColumnar );
// remap comparator attributes
for ( const auto & tRemap : m_dRemapCmp )
tResult.SetAttr ( tRemap.second, tResult.GetAttr ( tRemap.first ) );
// free original orphaned pointers
CSphSchemaHelper::FreeDataSpecial ( *pMatch, m_dDataPtrAttrs );
Swap ( pMatch->m_pDynamic, tResult.m_pDynamic );
pMatch->m_pStatic = nullptr;
}
void MatchesToNewSchema_c::PerformAction ( const MapAction_t & tAction, CSphMatch * pMatch, CSphMatch & tResult, const BYTE * pBlobPool, columnar::Columnar_i * pColumnar )
{
// try to minimize columnar switches inside the expression as this leads to recreating iterators
if ( tAction.IsExprEval() && pColumnar!=tAction.m_pPrevColumnar )
{
tAction.m_pExpr->Command ( SPH_EXPR_SET_COLUMNAR, (void*)pColumnar );
tAction.m_pPrevColumnar = pColumnar;
}
SphAttr_t uValue = 0;
switch ( tAction.m_eAction )
{
case MapAction_t::SETZERO:
break;
case MapAction_t::COPY:
uValue = pMatch->GetAttr ( *tAction.m_pFrom );
break;
case MapAction_t::COPYBLOB:
{
auto dBlob = sphGetBlobAttr ( *pMatch, *tAction.m_pFrom, pBlobPool );
uValue = (SphAttr_t) sphPackPtrAttr ( dBlob );
}
break;
case MapAction_t::COPYJSONFIELD:
{
SphAttr_t uPacked = pMatch->GetAttr ( *tAction.m_pFrom );
const BYTE * pStr = uPacked ? pBlobPool+sphJsonUnpackOffset ( uPacked ) : nullptr;
ESphJsonType eJson = sphJsonUnpackType ( uPacked );
if ( pStr && eJson!=JSON_NULL )
{
int iLengthBytes = sphJsonNodeSize ( eJson, pStr );
BYTE * pData = nullptr;
uValue = (SphAttr_t) sphPackPtrAttr ( iLengthBytes+1, &pData );
// store field type before the field
*pData = (BYTE) eJson;
memcpy ( pData+1, pStr, iLengthBytes );
}
}
break;
case MapAction_t::EVALEXPR_INT:
uValue = (SphAttr_t)tAction.m_pExpr->IntEval(*pMatch);
break;
case MapAction_t::EVALEXPR_BIGINT:
uValue = (SphAttr_t)tAction.m_pExpr->Int64Eval(*pMatch);
break;
case MapAction_t::EVALEXPR_STR:
uValue = (SphAttr_t)tAction.m_pExpr->StringEvalPacked(*pMatch);
break;
case MapAction_t::EVALEXPR_MVA:
uValue = (SphAttr_t)tAction.m_pExpr->Int64Eval(*pMatch);
break;
case MapAction_t::NULLMASK_INT2INT:
{
SphAttr_t uSrcValue = pMatch->GetAttr ( *tAction.m_pFrom );
for ( int i = 0; i < m_dNullRemap.GetLength(); i++ )
if ( ( uSrcValue & ( 1ULL << i ) ) && m_dNullRemap[i]!=-1 )
uValue |= 1ULL << m_dNullRemap[i];
}
break;
case MapAction_t::NULLMASK_STR2INT:
{
ByteBlob_t dUnpacked = sphUnpackPtrAttr ( (const BYTE*)pMatch->GetAttr ( *tAction.m_pFrom ) );
int iNumElements = dUnpacked.second*8;
BitVec_T<BYTE> tSrcMask ( (BYTE*)dUnpacked.first, iNumElements );
for ( int i = 0; i < iNumElements; i++ )
if ( tSrcMask.BitGet(i) && m_dNullRemap[i]!=-1 )
uValue |= 1ULL << m_dNullRemap[i];
}
break;
case MapAction_t::NULLMASK_INT2STR:
{
SphAttr_t uSrcValue = pMatch->GetAttr ( *tAction.m_pFrom );
BitVec_T<BYTE> tDstMask ( m_iNumDstAttrs );
for ( int i = 0; i < m_dNullRemap.GetLength(); i++ )
if ( ( uSrcValue & ( 1ULL << i ) ) && m_dNullRemap[i]!=-1 )
tDstMask.BitSet ( m_dNullRemap[i] );
uValue = (SphAttr_t)sphPackPtrAttr ( { tDstMask.Begin(), tDstMask.GetSizeBytes() } );
}
break;
case MapAction_t::NULLMASK_STR2STR:
uValue = (SphAttr_t)RepackNullMaskStr ( (const BYTE*)pMatch->GetAttr ( *tAction.m_pFrom ), m_iNumDstAttrs, m_dNullRemap );
break;
default:
assert(false && "Unknown state");
}
tResult.SetAttr ( *tAction.m_pTo, uValue );
}
///////////////////////////////////////////////////////////////////////////////
MatchProcessor_i * CreateMatchSchemaTransform ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema, GetBlobPoolFromMatch_fn fnGetBlobPool, GetColumnarFromMatch_fn fnGetColumnar )
{
return new MatchesToNewSchema_c ( pOldSchema, pNewSchema, fnGetBlobPool, fnGetColumnar );
}
| 16,607
|
C++
|
.cpp
| 379
| 41.23219
| 188
| 0.720441
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,912
|
cjkpreprocessor.cpp
|
manticoresoftware_manticoresearch/src/cjkpreprocessor.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "cjkpreprocessor.h"
#include "sphinxint.h"
#include "tokenizer/charset_definition_parser.h"
#include "tokenizer/tokenizer.h"
bool CJKPreprocessor_c::Process ( const BYTE * pBuffer, int iLength, CSphVector<BYTE> & dOut, bool bQuery )
{
if ( !pBuffer || !iLength )
return false;
if ( !sphDetectChinese ( pBuffer, iLength ) )
return false;
dOut.Resize(0);
const BYTE * pBufferMax = pBuffer+iLength;
bool bWasChineseCode = false;
const BYTE * pChunkStart = pBuffer;
bool bFirstCode = true;
while ( pBuffer<pBufferMax )
{
const BYTE * pTmp = pBuffer;
int iCode = sphUTF8Decode ( pBuffer );
bool bIsChineseCode = sphIsChineseCode(iCode);
if ( !bFirstCode && bWasChineseCode!=bIsChineseCode )
{
AddTextChunk ( pChunkStart, int ( pTmp-pChunkStart ), dOut, bWasChineseCode, bQuery );
pChunkStart = pTmp;
}
bWasChineseCode = bIsChineseCode;
bFirstCode = false;
}
AddTextChunk ( pChunkStart, int ( pBuffer-pChunkStart ), dOut, bWasChineseCode, bQuery );
return true;
}
bool CJKPreprocessor_c::SetBlendChars ( const char * szBlendChars, CSphString & sError )
{
return sphParseCharset ( szBlendChars, m_dBlendChars, &sError );
}
void CJKPreprocessor_c::AddTextChunk ( const BYTE * pStart, int iLen, CSphVector<BYTE> & dOut, bool bChinese, bool bQuery )
{
if ( !iLen )
return;
if ( bChinese )
{
ProcessBuffer ( pStart, iLen );
const BYTE * pToken;
int iTokenLen = 0;
while ( (pToken = GetNextToken(iTokenLen))!=nullptr )
{
bool bAddSpace = NeedAddSpace ( pToken, dOut, bQuery );
BYTE * pOut = dOut.AddN ( iTokenLen + ( bAddSpace ? 1 : 0 ) );
if ( bAddSpace )
*pOut++ = ' ';
memcpy ( pOut, pToken, iTokenLen );
}
}
else
{
bool bAddSpace = NeedAddSpace ( pStart, dOut, bQuery );
BYTE * pOut = dOut.AddN ( iLen + ( bAddSpace ? 1 : 0 ) );
if ( bAddSpace )
*pOut++ = ' ';
memcpy ( pOut, pStart, iLen );
}
}
//////////////////////////////////////////////////////////////////////////
FieldFilterCJK_c::FieldFilterCJK_c ( std::unique_ptr<CJKPreprocessor_c> pPreprocessor )
: m_pPreprocessor ( std::move(pPreprocessor) )
{}
bool FieldFilterCJK_c::Init ( CSphString & sError )
{
return m_pPreprocessor->Init(sError);
}
int FieldFilterCJK_c::Apply ( const BYTE * sField, int iLength, CSphVector<BYTE> & dStorage, bool bQuery )
{
if ( m_pParent )
{
int iResultLength = m_pParent->Apply ( sField, iLength, dStorage, bQuery );
if ( iResultLength ) // can't use dStorage.GetLength() because of the safety gap
{
CSphFixedVector<BYTE> dTmp ( iResultLength );
memcpy ( dTmp.Begin(), dStorage.Begin(), dStorage.GetLength() );
if ( !m_pPreprocessor->Process ( dTmp.Begin(), iLength, dStorage, bQuery ) )
return iResultLength;
// add safety gap
int iStorageLength = dStorage.GetLength();
if ( iStorageLength )
{
dStorage.Resize ( iStorageLength+4 );
dStorage[iStorageLength]='\0';
}
return iStorageLength;
}
}
if ( !m_pPreprocessor->Process ( sField, iLength, dStorage, bQuery ) )
return 0;
int iStorageLength = dStorage.GetLength();
*dStorage.AddN(4) = '\0';
return iStorageLength;
}
void FieldFilterCJK_c::GetSettings ( CSphFieldFilterSettings & tSettings ) const
{
if ( m_pParent )
m_pParent->GetSettings ( tSettings );
}
std::unique_ptr<ISphFieldFilter> CreateFilterCJK ( std::unique_ptr<ISphFieldFilter> pParent, std::unique_ptr<CJKPreprocessor_c> pPreprocessor, const char * szBlendChars, CSphString & sError )
{
auto pFilter = std::make_unique<FieldFilterCJK_c>( std::move(pPreprocessor) );
if ( !pFilter->Init(sError) )
return pParent;
if ( szBlendChars && *szBlendChars && !pFilter->SetBlendChars ( szBlendChars, sError ) )
return pParent;
pFilter->Setup ( std::move(pParent) );
return pFilter;
}
std::unique_ptr<ISphFieldFilter> FieldFilterCJK_c::Clone ( const FieldFilterOptions_t * pOptions ) const
{
std::unique_ptr<ISphFieldFilter> pClonedParent { m_pParent ? m_pParent->Clone(pOptions) : nullptr };
std::unique_ptr<CJKPreprocessor_c> pClonedPreprocessor { m_pPreprocessor->Clone(pOptions) };
CSphString sError;
auto pFilter = CreateFilterCJK ( std::move(pClonedParent), std::move(pClonedPreprocessor), m_sBlendChars.cstr(), sError );
if ( !pFilter )
sphWarning ( "ICU filter clone error '%s'", sError.cstr() );
return pFilter;
}
bool FieldFilterCJK_c::SetBlendChars ( const char * szBlendChars, CSphString & sError )
{
m_sBlendChars = szBlendChars;
return m_pPreprocessor->SetBlendChars ( szBlendChars, sError );
}
void FieldFilterCJK_c::Setup ( std::unique_ptr<ISphFieldFilter> pParent )
{
m_pParent = std::move(pParent);
}
//////////////////////////////////////////////////////////////////////
bool CheckTokenizerCJK ( CSphIndexSettings & tSettings, const CSphTokenizerSettings & tTokSettings, CSphString & sError )
{
if ( tSettings.m_ePreprocessor==Preprocessor_e::NONE )
return true;
StrVec_t dWarnings;
TokenizerRefPtr_c pTokenizer = Tokenizer::Create ( tTokSettings, nullptr, nullptr, dWarnings, sError );
if ( !pTokenizer.Ptr() )
return false;
const CSphLowercaser & tLC = pTokenizer->GetLowercaser();
const CharsetAlias_t * pCJKAlias = nullptr;
for ( const auto & i : GetCharsetAliases() )
if ( i.m_sName=="cjk" )
pCJKAlias = &i;
if ( !pCJKAlias )
return true;
int iFound = 0;
int iTotal = 0;
for ( const auto & i : pCJKAlias->m_dRemaps )
for ( int iCode = i.m_iStart; iCode<=i.m_iEnd; iCode++ )
{
if ( tLC.ToLower(iCode) )
iFound++;
iTotal++;
}
float fRatio = float(iFound)/iTotal;
if ( fRatio < 0.5f )
{
sError = "CJK segmentation turned on, check that you have CJK characters in charset_table";
return false;
}
return true;
}
| 6,173
|
C++
|
.cpp
| 175
| 32.725714
| 191
| 0.70515
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,913
|
querycontext.cpp
|
manticoresoftware_manticoresearch/src/querycontext.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "querycontext.h"
#include "sphinxint.h"
#include "sphinxsearch.h"
#include "sphinxsort.h"
#include "docstore.h"
static FORCE_INLINE void FreeDataPtrAttrs ( CSphMatch & tMatch, const CSphVector<ContextCalcItem_t> & dItems, const IntVec_t & dItemIndexes )
{
if ( !tMatch.m_pDynamic )
return;
for ( auto i : dItemIndexes )
{
const auto & tItem = dItems[i];
BYTE * pData = (BYTE *)tMatch.GetAttr ( tItem.m_tLoc );
// delete[] pData;
if ( pData )
{
sphDeallocatePacked ( pData );
tMatch.SetAttr ( tItem.m_tLoc, 0 );
}
}
}
static ESphEvalStage GetEarliestStage ( ESphEvalStage eStage, const CSphColumnInfo & tIn, const CSphVector<const ISphSchema *> & dSchemas )
{
for ( const auto * pSchema : dSchemas )
{
const CSphColumnInfo * pCol = pSchema->GetAttr ( tIn.m_sName.cstr() );
if ( !pCol || ( pCol->IsColumnar() && pCol->m_eStage==SPH_EVAL_STATIC ) )
continue;
eStage = Min ( eStage, pCol->m_eStage );
}
return eStage;
}
///////////////////////////////////////////////////////////////////////////////
void CSphQueryContext::ResetFilters()
{
m_pFilter.reset();
m_pWeightFilter.reset();
m_dUserVals.Reset();
}
void CSphQueryContext::BindWeights ( const CSphQuery & tQuery, const CSphSchema & tSchema, CSphString & sWarning )
{
const int HEAVY_FIELDS = SPH_MAX_FIELDS;
// defaults
m_iWeights = Min ( tSchema.GetFieldsCount(), HEAVY_FIELDS );
for ( int i=0; i<m_iWeights; ++i )
m_dWeights[i] = 1;
// name-bound weights
CSphString sFieldsNotFound;
if ( !tQuery.m_dFieldWeights.IsEmpty() )
{
for ( auto& tWeight : tQuery.m_dFieldWeights )
{
int j = tSchema.GetFieldIndex ( tWeight.first.cstr() );
if ( j<0 )
{
if ( sFieldsNotFound.IsEmpty() )
sFieldsNotFound = tWeight.first;
else
sFieldsNotFound.SetSprintf ( "%s %s", sFieldsNotFound.cstr(), tWeight.first.cstr() );
}
if ( j>=0 && j<HEAVY_FIELDS )
m_dWeights[j] = tWeight.second;
}
if ( !sFieldsNotFound.IsEmpty() )
sWarning.SetSprintf ( "Fields specified in field_weights option not found: [%s]", sFieldsNotFound.cstr() );
return;
}
// order-bound weights
if ( !tQuery.m_dWeights.IsEmpty() )
{
for ( int i=0, iLim=Min ( m_iWeights, tQuery.m_dWeights.GetLength() ); i<iLim; ++i )
m_dWeights[i] = (int) tQuery.m_dWeights[i];
}
}
bool CSphQueryContext::SetupCalc ( CSphQueryResultMeta & tMeta, const ISphSchema & tInSchema, const ISphSchema & tSchema, const BYTE * pBlobPool, const columnar::Columnar_i * pColumnar, const CSphVector<const ISphSchema *> & dInSchemas )
{
m_dCalcFilter.Resize(0);
m_dCalcSort.Resize(0);
m_dCalcFinal.Resize(0);
m_dCalcFilterPtrAttrs.Resize(0);
m_dCalcSortPtrAttrs.Resize(0);
// quickly verify that all my real attributes can be stashed there
if ( tInSchema.GetAttrsCount() < tSchema.GetAttrsCount() )
{
tMeta.m_sError.SetSprintf ( "INTERNAL ERROR: incoming-schema mismatch (incount=%d, mycount=%d)",
tInSchema.GetAttrsCount(), tSchema.GetAttrsCount() );
return false;
}
// now match everyone
for ( int iIn=0; iIn<tInSchema.GetAttrsCount(); iIn++ )
{
const CSphColumnInfo & tIn = tInSchema.GetAttr(iIn);
// recalculate stage as sorters set column at earlier stage
// FIXME!!! should we update column?
ESphEvalStage eStage = GetEarliestStage ( tIn.m_eStage, tIn, dInSchemas );
switch ( eStage )
{
case SPH_EVAL_STATIC:
{
// this check may significantly slow down queries with huge schema attribute count
#ifndef NDEBUG
const CSphColumnInfo * pMy = tSchema.GetAttr ( tIn.m_sName.cstr() );
if ( !pMy )
{
tMeta.m_sError.SetSprintf ( "INTERNAL ERROR: incoming-schema attr missing from table-schema (in=%s)",
sphDumpAttr(tIn).cstr() );
return false;
}
// static; check for full match
if (!( tIn==*pMy ))
{
assert ( 0 );
tMeta.m_sError.SetSprintf ( "INTERNAL ERROR: incoming-schema mismatch (in=%s, my=%s)",
sphDumpAttr(tIn).cstr(), sphDumpAttr(*pMy).cstr() );
return false;
}
#endif
break;
}
case SPH_EVAL_PREFILTER:
case SPH_EVAL_PRESORT:
case SPH_EVAL_FINAL:
{
ISphExprRefPtr_c pExpr { tIn.m_pExpr };
if ( !pExpr )
{
tMeta.m_sError.SetSprintf ( "INTERNAL ERROR: incoming-schema expression missing evaluator (stage=%d, in=%s)",
(int)eStage, sphDumpAttr(tIn).cstr() );
return false;
}
// an expression that index/searcher should compute
ContextCalcItem_t tCalc;
tCalc.m_eType = tIn.m_eAttrType;
tCalc.m_tLoc = tIn.m_tLocator;
tCalc.m_pExpr = std::move(pExpr);
tCalc.m_pExpr->Command ( SPH_EXPR_SET_BLOB_POOL, (void*)pBlobPool );
tCalc.m_pExpr->Command ( SPH_EXPR_SET_COLUMNAR, (void*)pColumnar );
switch ( eStage )
{
case SPH_EVAL_PREFILTER: AddToFilterCalc(tCalc); break;
case SPH_EVAL_PRESORT: AddToSortCalc(tCalc); break;
case SPH_EVAL_FINAL: m_dCalcFinal.Add(tCalc); break;
default: break;
}
break;
}
case SPH_EVAL_SORTER:
// sorter tells it will compute itself; so just skip it
case SPH_EVAL_POSTLIMIT:
break;
default:
tMeta.m_sError.SetSprintf ( "INTERNAL ERROR: unhandled eval stage=%d", (int)eStage );
return false;
}
}
// ok, we can emit matches in this schema (incoming for sorter, outgoing for index/searcher)
return true;
}
bool CSphQueryContext::CreateFilters ( CreateFilterContext_t & tCtx, CSphString & sError, CSphString & sWarning )
{
if ( !tCtx.m_pFilters || tCtx.m_pFilters->IsEmpty () )
return true;
if ( !sphCreateFilters ( tCtx, sError, sWarning ) )
return false;
m_pFilter = std::move ( tCtx.m_pFilter );
m_pWeightFilter = std::move ( tCtx.m_pWeightFilter );
m_dUserVals.SwapData ( tCtx.m_dUserVals );
return true;
}
void CSphQueryContext::AddToFilterCalc ( const ContextCalcItem_t & tCalc )
{
m_dCalcFilter.Add(tCalc);
if ( sphIsDataPtrAttr ( tCalc.m_eType ) )
m_dCalcFilterPtrAttrs.Add ( m_dCalcFilter.GetLength()-1 );
}
void CSphQueryContext::AddToSortCalc ( const ContextCalcItem_t & tCalc )
{
m_dCalcSort.Add(tCalc);
if ( sphIsDataPtrAttr ( tCalc.m_eType ) )
m_dCalcSortPtrAttrs.Add ( m_dCalcSort.GetLength()-1 );
}
void CSphQueryContext::FreeDataFilter ( CSphMatch & tMatch ) const
{
FreeDataPtrAttrs ( tMatch, m_dCalcFilter, m_dCalcFilterPtrAttrs );
}
void CSphQueryContext::FreeDataSort ( CSphMatch & tMatch ) const
{
FreeDataPtrAttrs ( tMatch, m_dCalcSort, m_dCalcSortPtrAttrs );
}
void CSphQueryContext::ExprCommand ( ESphExprCommand eCmd, void * pArg )
{
ARRAY_FOREACH ( i, m_dCalcFilter )
m_dCalcFilter[i].m_pExpr->Command ( eCmd, pArg );
ARRAY_FOREACH ( i, m_dCalcSort )
m_dCalcSort[i].m_pExpr->Command ( eCmd, pArg );
ARRAY_FOREACH ( i, m_dCalcFinal )
m_dCalcFinal[i].m_pExpr->Command ( eCmd, pArg );
}
void CSphQueryContext::SetBlobPool ( const BYTE * pBlobPool )
{
ExprCommand ( SPH_EXPR_SET_BLOB_POOL, (void*)pBlobPool );
if ( m_pFilter )
m_pFilter->SetBlobStorage ( pBlobPool );
if ( m_pWeightFilter )
m_pWeightFilter->SetBlobStorage ( pBlobPool );
}
void CSphQueryContext::SetColumnar ( const columnar::Columnar_i * pColumnar )
{
ExprCommand ( SPH_EXPR_SET_COLUMNAR, (void*)pColumnar );
}
void CSphQueryContext::SetDocstore ( const Docstore_i * pDocstore, int64_t iDocstoreSessionId )
{
DocstoreSession_c::InfoRowID_t tSessionInfo;
tSessionInfo.m_pDocstore = pDocstore;
tSessionInfo.m_iSessionId = iDocstoreSessionId;
ExprCommand ( SPH_EXPR_SET_DOCSTORE_ROWID, &tSessionInfo );
}
/// FIXME, perhaps
/// this rather crappy helper class really serves exactly 1 (one) simple purpose
///
/// it passes a sorting queue internals (namely, weight and float sortkey, if any,
/// of the current-worst queue element) to the MIN_TOP_WORST() and MIN_TOP_SORTVAL()
/// expression classes that expose those to the cruel outside world
///
/// all the COM-like EXTRA_xxx message back and forth is needed because expressions
/// are currently parsed and created earlier than the sorting queue
///
/// that also is the reason why we mischievously return 0 instead of clearly failing
/// with an error when the sortval is not a dynamic float; by the time we are parsing
/// expressions, we do not *yet* know that; but by the time we create a sorting queue,
/// we do not *want* to leak select expression checks into it
///
/// alternatively, we probably want to refactor this and introduce Bind(), to parse
/// expressions once, then bind them to actual searching contexts (aka index or segment,
/// and ranker, and sorter, and whatever else might be referenced by the expressions)
struct ContextExtra final : public ISphExtra
{
ISphRanker * m_pRanker;
ISphMatchSorter * m_pSorter;
ContextExtra ( ISphRanker* pRanker, ISphMatchSorter* pSorter)
: m_pRanker ( pRanker )
, m_pSorter ( pSorter )
{}
bool ExtraDataImpl ( ExtraData_e eData, void ** ppArg ) final
{
if ( eData!=EXTRA_GET_QUEUE_WORST && eData!=EXTRA_GET_QUEUE_SORTVAL )
return m_pRanker->ExtraData ( eData, ppArg );
if ( !m_pSorter )
return false;
const CSphMatch * pWorst = m_pSorter->GetWorst();
if ( !pWorst )
return false;
if ( eData==EXTRA_GET_QUEUE_WORST )
{
*ppArg = (void*)pWorst;
return true;
};
assert ( eData==EXTRA_GET_QUEUE_SORTVAL );
const CSphMatchComparatorState & tCmp = m_pSorter->GetState();
if ( tCmp.m_eKeypart[0]==SPH_KEYPART_FLOAT && tCmp.m_tLocator[0].m_bDynamic
&& tCmp.m_tLocator[0].m_iBitCount==32 && ( tCmp.m_tLocator[0].m_iBitOffset%32==0 )
&& tCmp.m_dAttrs[1]==-1 )
{
*(int*)ppArg = tCmp.m_tLocator[0].m_iBitOffset/32;
return true;
}
// min_top_sortval() only works with order by float_expr for now
return false;
}
};
void CSphQueryContext::SetupExtraData ( ISphRanker * pRanker, ISphMatchSorter * pSorter )
{
ContextExtra tExtra ( pRanker, pSorter );
ExprCommand ( SPH_EXPR_SET_EXTRA_DATA, &tExtra );
}
| 10,254
|
C++
|
.cpp
| 286
| 33.157343
| 237
| 0.712987
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,914
|
taskmalloctrim.cpp
|
manticoresoftware_manticoresearch/src/taskmalloctrim.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "taskmalloctrim.h"
#if HAVE_MALLOC_TRIM
#include <malloc.h>
#include "searchdtask.h"
int PerformMallocTrim ( size_t iPad )
{
return malloc_trim ( iPad );
}
void ScheduleMallocTrim ()
{
static int iMallocTrimTask = TaskManager::RegisterGlobal ( "malloc_trim(0) periodically", 1 );
static int64_t iLastMallocTrimTimestamp = sphMicroTimer();
TaskManager::ScheduleJob ( iMallocTrimTask, iLastMallocTrimTimestamp + DEFAULT_MALLOC_TRIM_PERIOD, []
{
PerformMallocTrim ( 0 );
iLastMallocTrimTimestamp = sphMicroTimer();
ScheduleMallocTrim ();
} );
}
#else
int PerformMallocTrim ( size_t ) {return 0;};
void ScheduleMallocTrim () {};
#endif
| 1,137
|
C++
|
.cpp
| 34
| 31.852941
| 102
| 0.759817
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,915
|
knnmisc.cpp
|
manticoresoftware_manticoresearch/src/knnmisc.cpp
|
//
// Copyright (c) 2023-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "knnmisc.h"
#include "knnlib.h"
#include "exprtraits.h"
#include "sphinxint.h"
#include "fileio.h"
#include "sphinxjson.h"
static void NormalizeVec ( VecTraits_T<float> & dData )
{
float fNorm = 0.0f;
for ( auto i : dData )
fNorm += i*i;
fNorm = 1.0f / ( sqrtf(fNorm) + 1e-30f );
for ( auto & i : dData )
i *= fNorm;
}
class Expr_KNNDist_c : public ISphExpr
{
public:
Expr_KNNDist_c ( const CSphVector<float> & dAnchor, const CSphColumnInfo & tAttr );
float Eval ( const CSphMatch & tMatch ) const override;
int IntEval ( const CSphMatch & tMatch ) const override { return (int)Eval(tMatch); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const override { return (int64_t)Eval(tMatch); }
void FixupLocator ( const ISphSchema * pOldSchema, const ISphSchema * pNewSchema ) override { sphFixupLocator ( m_tAttr.m_tLocator, pOldSchema, pNewSchema ); }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) override;
ISphExpr * Clone() const override { return new Expr_KNNDist_c ( m_dAnchor, m_tAttr ); }
void Command ( ESphExprCommand eCmd, void * pArg ) final;
void SetData ( const util::Span_T<const knn::DocDist_t> & dData );
private:
std::unique_ptr<knn::Distance_i> m_pDistCalc;
CSphVector<float> m_dAnchor;
CSphColumnInfo m_tAttr;
const BYTE * m_pBlobPool = nullptr;
std::unique_ptr<columnar::Iterator_i> m_pIterator;
util::Span_T<const knn::DocDist_t> m_dData;
mutable CSphVector<float> m_dTmp;
mutable const knn::DocDist_t * m_pStart = nullptr;
};
Expr_KNNDist_c::Expr_KNNDist_c ( const CSphVector<float> & dAnchor, const CSphColumnInfo & tAttr )
: m_pDistCalc ( CreateKNNDistanceCalc ( tAttr.m_tKNN ) )
, m_dAnchor ( dAnchor )
, m_tAttr ( tAttr )
{
if ( tAttr.m_tKNN.m_eHNSWSimilarity==knn::HNSWSimilarity_e::COSINE )
NormalizeVec(m_dAnchor);
}
float Expr_KNNDist_c::Eval ( const CSphMatch & tMatch ) const
{
if ( m_pStart ) // use precalculated data
{
const knn::DocDist_t * pEnd = m_dData.end();
const knn::DocDist_t * pPtr = std::lower_bound ( m_pStart, pEnd, tMatch.m_tRowID, []( auto & tEntry, RowID_t tValue ){ return tEntry.m_tRowID < tValue; } );
assert ( pPtr!=pEnd && pPtr->m_tRowID==tMatch.m_tRowID );
m_pStart = pPtr;
return m_pStart->m_fDist;
}
else // calculate distance
{
// this code path is used when no iterator is available, i.e. in ram chunk
// so performance is not critical
ByteBlob_t tRes;
if ( m_tAttr.IsColumnar() )
tRes.second = m_pIterator->Get ( tMatch.m_tRowID, tRes.first );
else
tRes = tMatch.FetchAttrData ( m_tAttr.m_tLocator, m_pBlobPool );
VecTraits_T<float> dData ( (float*)tRes.first, tRes.second / sizeof(float) );
if ( m_tAttr.m_tKNN.m_eHNSWSimilarity==knn::HNSWSimilarity_e::COSINE )
{
m_dTmp.Resize ( dData.GetLength() );
memcpy ( m_dTmp.Begin(), dData.Begin(), dData.GetLengthBytes() );
NormalizeVec(m_dTmp);
dData = m_dTmp;
}
if ( dData.GetLength()!=m_tAttr.m_tKNN.m_iDims )
return FLT_MAX;
return m_pDistCalc->CalcDist ( { dData.Begin(), (size_t)dData.GetLength() }, { m_dAnchor.Begin(), (size_t)m_dAnchor.GetLength() } );
}
}
void Expr_KNNDist_c::Command ( ESphExprCommand eCmd, void * pArg )
{
switch ( eCmd )
{
case SPH_EXPR_SET_COLUMNAR:
if ( m_tAttr.IsColumnar() )
{
auto pColumnar = (const columnar::Columnar_i*)pArg;
if ( pColumnar )
{
std::string sError; // FIXME! report errors
m_pIterator = CreateColumnarIterator ( pColumnar, m_tAttr.m_sName.cstr(), sError );
}
else
m_pIterator.reset();
}
break;
case SPH_EXPR_SET_BLOB_POOL:
m_pBlobPool = (const BYTE*)pArg;
// reset our temporary data (e.g. between index chunks)
m_pStart = nullptr;
break;
default:
break;
}
}
void Expr_KNNDist_c::SetData ( const util::Span_T<const knn::DocDist_t> & dData )
{
m_dData = dData;
m_pStart = m_dData.data();
}
uint64_t Expr_KNNDist_c::GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME("Expr_KNNDist_c");
return CALC_DEP_HASHES();
}
/////////////////////////////////////////////////////////////////////
const char * GetKnnDistAttrName()
{
static const char * szName = "@knn_dist";
return szName;
}
ISphExpr * CreateExpr_KNNDist ( const CSphVector<float> & dAnchor, const CSphColumnInfo & tAttr )
{
return new Expr_KNNDist_c ( dAnchor, tAttr );
}
static const char * HNSWSimilarity2Str ( knn::HNSWSimilarity_e eSim )
{
switch ( eSim )
{
case knn::HNSWSimilarity_e::L2: return "L2";
case knn::HNSWSimilarity_e::IP: return "IP";
case knn::HNSWSimilarity_e::COSINE: return "COSINE";
default: return nullptr;
}
}
static knn::HNSWSimilarity_e Str2HNSWSimilarity ( const CSphString & sSimilarity )
{
CSphString sSim = sSimilarity;
sSim.ToUpper();
if ( sSim=="L2" ) return knn::HNSWSimilarity_e::L2;
if ( sSim=="IP" ) return knn::HNSWSimilarity_e::IP;
if ( sSim=="COSINE" ) return knn::HNSWSimilarity_e::COSINE;
assert ( 0 && "Unknown similarity");
return knn::HNSWSimilarity_e::L2;
}
void AddKNNSettings ( StringBuilder_c & sRes, const CSphColumnInfo & tAttr )
{
if ( !tAttr.IsIndexedKNN() )
return;
const auto & tKNN = tAttr.m_tKNN;
sRes << " knn_type='hnsw'";
sRes << " knn_dims='" << tKNN.m_iDims << "'";
sRes << " hnsw_similarity='" << HNSWSimilarity2Str ( tKNN.m_eHNSWSimilarity ) << "'";
knn::IndexSettings_t tDefault;
if ( tKNN.m_iHNSWM!=tDefault.m_iHNSWM )
sRes << " hnsw_m='" << tKNN.m_iHNSWM << "'";
if ( tKNN.m_iHNSWEFConstruction!=tDefault.m_iHNSWEFConstruction )
sRes << " hnsw_ef_construction='" << tKNN.m_iHNSWEFConstruction << "'";
}
knn::IndexSettings_t ReadKNNJson ( bson::Bson_c tRoot )
{
knn::IndexSettings_t tRes;
tRes.m_iDims = (int) bson::Int ( tRoot.ChildByName ( "knn_dims" ) );
tRes.m_eHNSWSimilarity = Str2HNSWSimilarity ( bson::String ( tRoot.ChildByName ( "hnsw_similarity" ) ) );
tRes.m_iHNSWM = (int) bson::Int ( tRoot.ChildByName ( "hnsw_m" ), tRes.m_iHNSWM );
tRes.m_iHNSWEFConstruction = (int) bson::Int ( tRoot.ChildByName ( "hnsw_ef_construction" ), tRes.m_iHNSWEFConstruction );
return tRes;
}
void operator << ( JsonEscapedBuilder & tOut, const knn::IndexSettings_t & tSettings )
{
auto _ = tOut.Object();
knn::IndexSettings_t tDefault;
tOut.NamedString ( "knn_type", "hnsw" );
tOut.NamedVal ( "knn_dims", tSettings.m_iDims );
tOut.NamedString ( "hnsw_similarity", HNSWSimilarity2Str ( tSettings.m_eHNSWSimilarity ) );
tOut.NamedValNonDefault ( "hnsw_m", tSettings.m_iHNSWM, tDefault.m_iHNSWM );
tOut.NamedValNonDefault ( "hnsw_ef_construction", tSettings.m_iHNSWEFConstruction, tDefault.m_iHNSWEFConstruction );
}
CSphString FormatKNNConfigStr ( const CSphVector<NamedKNNSettings_t> & dAttrs )
{
JsonObj_c tRoot;
JsonObj_c tArray(true);
for ( auto & i : dAttrs )
{
JsonObj_c tObj;
tObj.AddStr ( "name", i.m_sName );
tObj.AddStr ( "type", "hnsw" );
tObj.AddInt ( "dims", i.m_iDims );
tObj.AddStr ( "hnsw_similarity", HNSWSimilarity2Str ( i.m_eHNSWSimilarity ) );
tObj.AddInt ( "hnsw_m", i.m_iHNSWM );
tObj.AddInt ( "hnsw_ef_construction", i.m_iHNSWEFConstruction );
tArray.AddItem(tObj);
}
tRoot.AddItem ( "attrs", tArray );
return tRoot.AsString(false);
}
bool ParseKNNConfigStr ( const CSphString & sStr, CSphVector<NamedKNNSettings_t> & dParsed, CSphString & sError )
{
JsonObj_c tRoot ( sStr.cstr() );
const int MAX_ERROR_LEN = 256;
char szError[MAX_ERROR_LEN];
if ( tRoot.GetError ( szError, MAX_ERROR_LEN, sError ) )
return false;
JsonObj_c tAttrs = tRoot.GetArrayItem ( "attrs", sError );
if ( !tAttrs )
return false;
for ( const auto & i : tAttrs )
{
auto & tParsed = dParsed.Add();
if ( !i.FetchStrItem ( tParsed.m_sName, "name", sError ) )
return false;
CSphString sType;
if ( !i.FetchStrItem ( sType, "type", sError ) )
return false;
sType.ToUpper();
if ( sType!="HNSW" )
{
sError.SetSprintf ( "Unknown knn type '%s'", sType.cstr() );
return false;
}
CSphString sSimilarity;
if ( !i.FetchIntItem ( tParsed.m_iDims, "dims", sError ) ) return false;
if ( !i.FetchIntItem ( tParsed.m_iHNSWM, "hnsw_m", sError, true ) ) return false;
if ( !i.FetchIntItem ( tParsed.m_iHNSWEFConstruction, "hnsw_ef_construction", sError, true ) ) return false;
if ( !i.FetchStrItem ( sSimilarity, "hnsw_similarity", sError) ) return false;
sSimilarity.ToUpper();
if ( sSimilarity!="L2" && sSimilarity!="IP" && sSimilarity!="COSINE" )
{
sError.SetSprintf ( "Unknown knn similarity '%s'", sSimilarity.cstr() );
return false;
}
tParsed.m_eHNSWSimilarity = Str2HNSWSimilarity ( sSimilarity.cstr() );
}
return true;
}
std::unique_ptr<knn::Builder_i> BuildCreateKNN ( const ISphSchema & tSchema, int64_t iNumElements, CSphVector<PlainOrColumnar_t> & dAttrs, CSphString & sError )
{
std::unique_ptr<knn::Builder_i> pBuilder = CreateKNNBuilder ( tSchema, iNumElements, sError );
if ( !pBuilder )
return pBuilder;
int iColumnar = 0;
for ( int i = 0; i < tSchema.GetAttrsCount(); i++ )
{
const CSphColumnInfo & tAttr = tSchema.GetAttr(i);
if ( tAttr.IsIndexedKNN() )
dAttrs.Add ( PlainOrColumnar_t ( tAttr, iColumnar ) );
if ( tAttr.IsColumnar() )
iColumnar++;
}
return pBuilder;
}
bool BuildStoreKNN ( RowID_t tRowID, const CSphRowitem * pRow, const BYTE * pPool, CSphVector<ScopedTypedIterator_t> & dIterators, const CSphVector<PlainOrColumnar_t> & dAttrs, knn::Builder_i & tBuilder )
{
int iKNNAttrIndex = 0;
for ( auto & i : dAttrs )
{
assert ( i.m_eType==SPH_ATTR_FLOAT_VECTOR );
const BYTE * pSrc = nullptr;
int iBytes = i.Get ( tRowID, pRow, pPool, dIterators, pSrc );
int iValues = iBytes / sizeof(float);
if ( !tBuilder.SetAttr ( iKNNAttrIndex, { (float*)pSrc, (size_t)iValues } ) )
return false;
iKNNAttrIndex++;
}
return true;
}
std::pair<RowidIterator_i *, bool> CreateKNNIterator ( knn::KNN_i * pKNN, const CSphQuery & tQuery, const ISphSchema & tIndexSchema, const ISphSchema & tSorterSchema, CSphString & sError )
{
if ( tQuery.m_sKNNAttr.IsEmpty() )
return { nullptr, false };
auto pKNNAttr = tIndexSchema.GetAttr ( tQuery.m_sKNNAttr.cstr() );
if ( !pKNNAttr )
{
sError.SetSprintf ( "KNN search attribute '%s' not found", tQuery.m_sKNNAttr.cstr() );
return { nullptr, true };
}
if ( !pKNNAttr->IsIndexedKNN() )
{
sError.SetSprintf ( "KNN search attribute '%s' does not have KNN index", tQuery.m_sKNNAttr.cstr() );
return { nullptr, true };
}
if ( !pKNN )
{
sError = "KNN index not loaded" ;
return { nullptr, true };
}
const auto pAttr = tSorterSchema.GetAttr ( GetKnnDistAttrName() );
assert(pAttr);
ISphExpr * pExpr = pAttr->m_pExpr;
assert(pExpr);
auto pKnnDist = (Expr_KNNDist_c*)pExpr;
CSphVector<float> dPoint ( tQuery.m_dKNNVec );
if ( pKNNAttr->m_tKNN.m_eHNSWSimilarity == knn::HNSWSimilarity_e::COSINE )
NormalizeVec(dPoint);
std::string sErrorSTL;
knn::Iterator_i * pIterator = pKNN->CreateIterator ( pKNNAttr->m_sName.cstr(), { dPoint.Begin(), (size_t)dPoint.GetLength() }, tQuery.m_iKNNK, tQuery.m_iKnnEf, sErrorSTL );
if ( !pIterator )
{
sError = sErrorSTL.c_str();
return { nullptr, true };
}
pKnnDist->SetData ( pIterator->GetData() );
return { CreateIteratorWrapper ( pIterator, nullptr ), false };
}
RowIteratorsWithEstimates_t CreateKNNIterators ( knn::KNN_i * pKNN, const CSphQuery & tQuery, const ISphSchema & tIndexSchema, const ISphSchema & tSorterSchema, bool & bError, CSphString & sError )
{
RowIteratorsWithEstimates_t dIterators;
auto tRes = CreateKNNIterator ( pKNN, tQuery, tIndexSchema, tSorterSchema, sError );
if ( tRes.second )
{
bError = true;
return dIterators;
}
if ( !tRes.first )
return dIterators;
dIterators.Add ( { tRes.first, tQuery.m_iKNNK } );
return dIterators;
}
| 12,176
|
C++
|
.cpp
| 325
| 34.990769
| 204
| 0.699107
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,916
|
columnarexpr.cpp
|
manticoresoftware_manticoresearch/src/columnarexpr.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "columnarexpr.h"
#include "exprtraits.h"
#include "sphinxint.h"
#include "conversion.h"
template <typename T>
class Expr_Columnar_MVAIn_T : public Expr_ArgVsConstSet_T<int64_t>
{
using BASE = Expr_ArgVsConstSet_T<int64_t>;
public:
Expr_Columnar_MVAIn_T ( const CSphString & sName, ConstList_c * pConsts );
ByteBlob_t MvaEval ( const CSphMatch & ) const final { assert ( 0 && "not implemented" ); return {nullptr,0}; }
int IntEval ( const CSphMatch & tMatch ) const final;
void Command ( ESphExprCommand eCmd, void * pArg ) final;
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final;
ISphExpr * Clone() const final { return new Expr_Columnar_MVAIn_T ( *this ); }
private:
std::unique_ptr<columnar::Iterator_i> m_pIterator;
CSphString m_sName;
Expr_Columnar_MVAIn_T ( const Expr_Columnar_MVAIn_T & rhs );
};
template <typename T>
Expr_Columnar_MVAIn_T<T>::Expr_Columnar_MVAIn_T ( const CSphString & sName, ConstList_c * pConsts )
: BASE ( nullptr, pConsts, false )
, m_sName ( sName )
{
assert ( pConsts );
BASE::m_dValues.Sort();
}
template <typename T>
int Expr_Columnar_MVAIn_T<T>::IntEval ( const CSphMatch & tMatch ) const
{
const uint8_t * pData = nullptr;
int iLen = m_pIterator->Get ( tMatch.m_tRowID, pData );
return MvaEval_Any<T> ( { (T*)const_cast<uint8_t*>(pData), int64_t(iLen/sizeof(T)) }, m_dValues );
}
template <typename T>
void Expr_Columnar_MVAIn_T<T>::Command ( ESphExprCommand eCmd, void * pArg )
{
BASE::Command ( eCmd, pArg );
if ( eCmd==SPH_EXPR_SET_COLUMNAR )
{
auto pColumnar = (const columnar::Columnar_i*)pArg;
if ( pColumnar )
{
std::string sError; // FIXME! report errors
m_pIterator = CreateColumnarIterator ( pColumnar, m_sName.cstr(), sError );
}
}
}
template <typename T>
uint64_t Expr_Columnar_MVAIn_T<T>::GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME("Expr_Columnar_MVAIn_T");
return CALC_DEP_HASHES_EX(m_uValueHash);
}
template <typename T>
Expr_Columnar_MVAIn_T<T>::Expr_Columnar_MVAIn_T ( const Expr_Columnar_MVAIn_T & rhs )
: BASE(rhs)
, m_sName ( rhs.m_sName )
{}
/////////////////////////////////////////////////////////////////////
class Expr_Columnar_StringIn_c : public Expr_ArgVsConstSet_T<int64_t>
{
using BASE = Expr_ArgVsConstSet_T<int64_t>;
public:
Expr_Columnar_StringIn_c ( const CSphString & sName, ConstList_c * pConsts, ESphCollation eCollation );
int IntEval ( const CSphMatch & tMatch ) const final;
void Command ( ESphExprCommand eCmd, void * pArg ) final;
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final;
ISphExpr * Clone() const final { return new Expr_Columnar_StringIn_c ( *this ); }
private:
std::unique_ptr<columnar::Iterator_i> m_pIterator;
CSphString m_sName;
StrHashCalc_fn m_fnHashCalc = nullptr;
CSphVector<uint64_t> m_dHashes;
ESphCollation m_eCollation = SPH_COLLATION_DEFAULT;
bool m_bHasHashes = false;
Expr_Columnar_StringIn_c ( const Expr_Columnar_StringIn_c & rhs );
uint64_t GetStringHash ( RowID_t tRowID ) const;
};
Expr_Columnar_StringIn_c::Expr_Columnar_StringIn_c ( const CSphString & sName, ConstList_c * pConsts, ESphCollation eCollation )
: BASE ( nullptr, pConsts, false )
, m_sName ( sName )
, m_fnHashCalc ( GetStringHashCalcFunc(eCollation) )
, m_eCollation ( eCollation )
{
assert(pConsts);
const char * szExpr = pConsts->m_sExpr.first;
int iExprLen = pConsts->m_sExpr.second;
for ( int64_t iVal : m_dValues )
{
int iOfs = GetConstStrOffset ( iVal );
int iLen = GetConstStrLength ( iVal );
if ( iOfs>0 && iOfs+iLen<=iExprLen )
{
auto tRes = SqlUnescapeN ( szExpr + iOfs, iLen );
m_dHashes.Add ( tRes.first.IsEmpty() ? 0 : m_fnHashCalc ( (const BYTE*)tRes.first.cstr(), tRes.second, SPH_FNV64_SEED ) );
}
}
}
int Expr_Columnar_StringIn_c::IntEval ( const CSphMatch & tMatch ) const
{
uint64_t uHash = GetStringHash ( tMatch.m_tRowID );
for ( auto i : m_dHashes )
if ( i==uHash )
return 1;
return 0;
}
void Expr_Columnar_StringIn_c::Command ( ESphExprCommand eCmd, void * pArg )
{
BASE::Command ( eCmd, pArg );
if ( eCmd==SPH_EXPR_SET_COLUMNAR )
{
auto pColumnar = (const columnar::Columnar_i*)pArg;
if ( pColumnar )
{
columnar::IteratorHints_t tHints;
columnar::IteratorCapabilities_t tCapabilities;
tHints.m_bNeedStringHashes = m_eCollation==SPH_COLLATION_DEFAULT;
std::string sError; // FIXME! report errors
m_pIterator = CreateColumnarIterator ( pColumnar, m_sName.cstr(), sError, tHints, &tCapabilities );
assert ( m_pIterator );
m_bHasHashes = tCapabilities.m_bStringHashes;
}
}
}
uint64_t Expr_Columnar_StringIn_c::GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME("Expr_Columnar_StringIn_c");
CALC_STR_HASH ( m_sName, m_sName.Length() );
CALC_POD_HASH(m_fnHashCalc);
CALC_POD_HASHES(m_dHashes);
CALC_POD_HASH(m_eCollation);
CALC_POD_HASH(m_bHasHashes);
return CALC_PARENT_HASH_EX(m_uValueHash);
}
Expr_Columnar_StringIn_c::Expr_Columnar_StringIn_c ( const Expr_Columnar_StringIn_c & rhs )
: BASE (rhs)
, m_fnHashCalc ( rhs.m_fnHashCalc )
, m_dHashes ( rhs.m_dHashes )
{}
uint64_t Expr_Columnar_StringIn_c::GetStringHash ( RowID_t tRowID ) const
{
if ( m_bHasHashes )
return m_pIterator->Get(tRowID);
const BYTE * pStr = nullptr;
int iLen = m_pIterator->Get ( tRowID, pStr );
if ( !iLen )
return 0;
return m_fnHashCalc ( pStr, iLen, SPH_FNV64_SEED );
}
/////////////////////////////////////////////////////////////////////
class Expr_Columnar_StringLength_c : public ISphExpr
{
public:
Expr_Columnar_StringLength_c ( const CSphString & sName ) : m_sName ( sName ) {}
int IntEval ( const CSphMatch & tMatch ) const override { return m_pIterator->GetLength( tMatch.m_tRowID ); }
void Command ( ESphExprCommand eCmd, void * pArg ) override;
float Eval ( const CSphMatch & tMatch ) const final { return (float)IntEval ( tMatch ); }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) override;
void FixupLocator ( const ISphSchema * /*pOldSchema*/, const ISphSchema * /*pNewSchema*/ ) final {}
ISphExpr * Clone() const override{ return new Expr_Columnar_StringLength_c(m_sName); }
protected:
CSphString m_sName;
std::unique_ptr<columnar::Iterator_i> m_pIterator;
};
void Expr_Columnar_StringLength_c::Command ( ESphExprCommand eCmd, void * pArg )
{
if ( eCmd==SPH_EXPR_SET_COLUMNAR )
{
auto pColumnar = (const columnar::Columnar_i*)pArg;
if ( pColumnar )
{
std::string sError; // FIXME! report errors
m_pIterator = CreateColumnarIterator ( pColumnar, m_sName.cstr(), sError );
}
}
}
uint64_t Expr_Columnar_StringLength_c::GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME("Expr_Columnar_StringLength_c");
CALC_STR_HASH ( m_sName, m_sName.Length() );
return CALC_DEP_HASHES();
}
/////////////////////////////////////////////////////////////////////
template<typename T>
class Expr_Columnar_MvaLength_T : public Expr_Columnar_StringLength_c
{
using Expr_Columnar_StringLength_c::Expr_Columnar_StringLength_c;
public:
int IntEval ( const CSphMatch & tMatch ) const final { return m_pIterator->GetLength ( tMatch.m_tRowID ) / sizeof(T); }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final;
ISphExpr * Clone() const final { return new Expr_Columnar_MvaLength_T(m_sName); }
};
template<typename T>
uint64_t Expr_Columnar_MvaLength_T<T>::GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME("Expr_Columnar_MvaLength_T");
CALC_STR_HASH ( m_sName, m_sName.Length() );
return CALC_DEP_HASHES();
}
/////////////////////////////////////////////////////////////////////
// aggregate functions evaluator for columnar MVAs
template <typename T>
class Expr_ColumnarMVAAggr_T : public Expr_NoLocator_c
{
public:
Expr_ColumnarMVAAggr_T ( ISphExpr * pExpr, ESphAggrFunc eFunc );
int64_t Int64Eval ( const CSphMatch & tMatch ) const final;
float Eval ( const CSphMatch & tMatch ) const final { return (float)Int64Eval ( tMatch ); }
int IntEval ( const CSphMatch & tMatch ) const final { return (int)Int64Eval ( tMatch ); }
void Command ( ESphExprCommand eCmd, void * pArg ) final;
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final;
ISphExpr * Clone() const final { return new Expr_ColumnarMVAAggr_T ( *this ); }
protected:
CSphRefcountedPtr<ISphExpr> m_pExpr;
ESphAggrFunc m_eFunc = SPH_AGGR_NONE;
private:
Expr_ColumnarMVAAggr_T ( const Expr_ColumnarMVAAggr_T & rhs );
};
template <typename T>
Expr_ColumnarMVAAggr_T<T>::Expr_ColumnarMVAAggr_T ( ISphExpr * pExpr, ESphAggrFunc eFunc )
: m_pExpr ( pExpr )
, m_eFunc ( eFunc )
{
SafeAddRef(m_pExpr);
}
template <typename T>
int64_t Expr_ColumnarMVAAggr_T<T>::Int64Eval ( const CSphMatch & tMatch ) const
{
if ( !m_pExpr )
return 0;
ByteBlob_t tMva = m_pExpr->MvaEval(tMatch);
if ( !tMva.second )
return 0;
int nValues = tMva.second / sizeof(T);
const T * L = (const T *)tMva.first;
const T * R = L+nValues-1;
switch ( m_eFunc )
{
case SPH_AGGR_MIN: return *L;
case SPH_AGGR_MAX: return *R;
default: return 0;
}
}
template <typename T>
void Expr_ColumnarMVAAggr_T<T>::Command ( ESphExprCommand eCmd, void * pArg )
{
if ( m_pExpr )
m_pExpr->Command ( eCmd, pArg );
}
template <typename T>
uint64_t Expr_ColumnarMVAAggr_T<T>::GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME("Expr_ColumnarMVAAggr_T");
CALC_POD_HASH(m_eFunc);
CALC_CHILD_HASH(m_pExpr);
return CALC_DEP_HASHES();
}
template <typename T>
Expr_ColumnarMVAAggr_T<T>::Expr_ColumnarMVAAggr_T ( const Expr_ColumnarMVAAggr_T & rhs )
: m_pExpr ( SafeClone ( rhs.m_pExpr ) )
, m_eFunc ( rhs.m_eFunc )
{}
/////////////////////////////////////////////////////////////////////
class Expr_GetColumnar_Traits_c : public ISphExpr
{
public:
Expr_GetColumnar_Traits_c ( const CSphString & sName, bool bStored );
Expr_GetColumnar_Traits_c ( const Expr_GetColumnar_Traits_c & rhs );
void FixupLocator ( const ISphSchema * /*pOldSchema*/, const ISphSchema * /*pNewSchema*/ ) final {}
void Command ( ESphExprCommand eCmd, void * pArg ) final;
bool IsColumnar ( bool * pStored ) const final;
protected:
CSphString m_sName;
bool m_bStored = false;
std::unique_ptr<columnar::Iterator_i> m_pIterator;
uint64_t CalcHash ( const char * szTag, const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME_NOCHECK(szTag);
CALC_STR_HASH(m_sName, m_sName.Length());
CALC_POD_HASH(m_bStored);
return CALC_DEP_HASHES();
}
};
Expr_GetColumnar_Traits_c::Expr_GetColumnar_Traits_c ( const CSphString & sName, bool bStored )
: m_sName ( sName )
, m_bStored ( bStored )
{}
Expr_GetColumnar_Traits_c::Expr_GetColumnar_Traits_c ( const Expr_GetColumnar_Traits_c & rhs )
: m_sName ( rhs.m_sName )
, m_bStored ( rhs.m_bStored )
{}
bool Expr_GetColumnar_Traits_c::IsColumnar ( bool * pStored ) const
{
if ( pStored )
*pStored = m_bStored;
return true;
}
void Expr_GetColumnar_Traits_c::Command ( ESphExprCommand eCmd, void * pArg )
{
switch ( eCmd )
{
case SPH_EXPR_SET_COLUMNAR:
{
auto pColumnar = (const columnar::Columnar_i*)pArg;
if ( pColumnar )
{
std::string sError; // FIXME! report errors
m_pIterator = CreateColumnarIterator ( pColumnar, m_sName.cstr(), sError );
}
else
m_pIterator.reset();
}
break;
case SPH_EXPR_GET_COLUMNAR_COL:
*(CSphString*)pArg = m_sName;
break;
default:
break;
}
}
/////////////////////////////////////////////////////////////////////
class Expr_GetColumnarInt_c : public Expr_GetColumnar_Traits_c
{
using Expr_GetColumnar_Traits_c::Expr_GetColumnar_Traits_c;
public:
float Eval ( const CSphMatch & tMatch ) const override { return (float)FetchValue(tMatch); }
int IntEval ( const CSphMatch & tMatch ) const override { return (int)FetchValue(tMatch); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const override { return FetchValue(tMatch); }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final;
ISphExpr * Clone() const override { return new Expr_GetColumnarInt_c ( m_sName, m_bStored ); }
protected:
inline SphAttr_t FetchValue ( const CSphMatch & tMatch ) const { return m_pIterator->Get ( tMatch.m_tRowID ); }
};
uint64_t Expr_GetColumnarInt_c::GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME("Expr_GetColumnarInt_c");
return CALC_PARENT_HASH();
}
/////////////////////////////////////////////////////////////////////
class Expr_GetColumnarFloat_c : public Expr_GetColumnarInt_c
{
using Expr_GetColumnarInt_c::Expr_GetColumnarInt_c;
public:
float Eval ( const CSphMatch & tMatch ) const final { return sphDW2F ( (DWORD)FetchValue(tMatch) ); }
int IntEval ( const CSphMatch & tMatch ) const final { return (int)sphDW2F ( (DWORD)FetchValue(tMatch) ); }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return (int64_t)sphDW2F ( (DWORD)FetchValue(tMatch) ); }
ISphExpr * Clone() const final { return new Expr_GetColumnarFloat_c ( m_sName, m_bStored ); }
};
/////////////////////////////////////////////////////////////////////
class Expr_GetColumnarString_c : public Expr_GetColumnar_Traits_c
{
using Expr_GetColumnar_Traits_c::Expr_GetColumnar_Traits_c;
public:
float Eval ( const CSphMatch & ) const final { assert ( 0 ); return 0; }
int StringEval ( const CSphMatch & tMatch, const BYTE ** ppStr ) const final { return m_pIterator->Get ( tMatch.m_tRowID, *ppStr ); }
const BYTE * StringEvalPacked ( const CSphMatch & tMatch ) const final { return m_pIterator->GetPacked ( tMatch.m_tRowID ); }
int StringLenEval ( const CSphMatch & tMatch ) const final { return m_pIterator->GetLength ( tMatch.m_tRowID ); }
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final;
ISphExpr * Clone() const final { return new Expr_GetColumnarString_c ( m_sName, m_bStored ); }
};
uint64_t Expr_GetColumnarString_c::GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME("Expr_GetColumnarString_c");
return CALC_PARENT_HASH();
}
//////////////////////////////////////////////////////////////////////////
class Expr_GetColumnarMva_c : public Expr_GetColumnar_Traits_c
{
using Expr_GetColumnar_Traits_c::Expr_GetColumnar_Traits_c;
public:
float Eval ( const CSphMatch & ) const final { assert ( 0 ); return 0; }
int64_t Int64Eval ( const CSphMatch & tMatch ) const final { return (int64_t)m_pIterator->GetPacked ( tMatch.m_tRowID ); }
ByteBlob_t MvaEval ( const CSphMatch & tMatch ) const final;
uint64_t GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable ) final;
ISphExpr * Clone() const final { return new Expr_GetColumnarMva_c ( m_sName, m_bStored ); }
};
ByteBlob_t Expr_GetColumnarMva_c::MvaEval ( const CSphMatch & tMatch ) const
{
const BYTE * pResult = nullptr;
int iBytes = m_pIterator->Get ( tMatch.m_tRowID, pResult );
return {pResult, iBytes};
}
uint64_t Expr_GetColumnarMva_c::GetHash ( const ISphSchema & tSorterSchema, uint64_t uPrevHash, bool & bDisable )
{
EXPR_CLASS_NAME("Expr_GetColumnarMva_c");
return CALC_PARENT_HASH();
}
/////////////////////////////////////////////////////////////////////
ISphExpr * CreateExpr_ColumnarMva32In ( const CSphString & sName, ConstList_c * pConsts ) { return new Expr_Columnar_MVAIn_T<DWORD> ( sName, pConsts ); }
ISphExpr * CreateExpr_ColumnarMva64In ( const CSphString & sName, ConstList_c * pConsts ) { return new Expr_Columnar_MVAIn_T<int64_t> ( sName, pConsts ); }
ISphExpr * CreateExpr_ColumnarStringIn ( const CSphString & sName, ConstList_c * pConsts, ESphCollation eCollation ) { return new Expr_Columnar_StringIn_c ( sName, pConsts, eCollation ); }
ISphExpr * CreateExpr_ColumnarStringLength ( const CSphString & sName ) { return new Expr_Columnar_StringLength_c(sName); }
ISphExpr * CreateExpr_ColumnarMva32Length ( const CSphString & sName ) { return new Expr_Columnar_MvaLength_T<DWORD>(sName); }
ISphExpr * CreateExpr_ColumnarMva64Length ( const CSphString & sName ) { return new Expr_Columnar_MvaLength_T<uint64_t>(sName); }
ISphExpr * CreateExpr_ColumnarMva32Aggr ( ISphExpr * pExpr, ESphAggrFunc eFunc ) { return new Expr_ColumnarMVAAggr_T<DWORD> ( pExpr, eFunc ); }
ISphExpr * CreateExpr_ColumnarMva64Aggr ( ISphExpr * pExpr, ESphAggrFunc eFunc ) { return new Expr_ColumnarMVAAggr_T<int64_t> ( pExpr, eFunc ); }
ISphExpr * CreateExpr_GetColumnarInt ( const CSphString & sName, bool bStored ) { return new Expr_GetColumnarInt_c ( sName, bStored ); }
ISphExpr * CreateExpr_GetColumnarFloat ( const CSphString & sName, bool bStored ) { return new Expr_GetColumnarFloat_c ( sName, bStored ); }
ISphExpr * CreateExpr_GetColumnarString ( const CSphString & sName, bool bStored ) { return new Expr_GetColumnarString_c ( sName, bStored ); }
ISphExpr * CreateExpr_GetColumnarMva ( const CSphString & sName, bool bStored ) { return new Expr_GetColumnarMva_c ( sName, bStored ); }
| 17,846
|
C++
|
.cpp
| 413
| 41.142857
| 188
| 0.701968
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,917
|
digest_sha1.cpp
|
manticoresoftware_manticoresearch/src/digest_sha1.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "digest_sha1.h"
#include "sphinxstd.h"
#include "fileio.h"
#include "fileutils.h"
#include "indexsettings.h"
#if WITH_SSL
#define USE_SHA1_FROM_OPENSSL
#include <openssl/evp.h>
#endif
//////////////////////////////////////////////////////////////////////////
// SHA1 digests
//////////////////////////////////////////////////////////////////////////
// SHA1 from https://github.com/shodanium/nanomysql/blob/master/nanomysql.cpp
// nanomysql, a tiny MySQL client
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
static constexpr int SHA1_SIZE = HASH20_SIZE;
class SHA1_c::Impl_c
{
public:
#ifdef USE_SHA1_FROM_OPENSSL
void Init()
{
m_dCtx = EVP_MD_CTX_create();
EVP_DigestInit_ex ( m_dCtx, EVP_sha1(), nullptr );
}
void Update ( const BYTE* data, int len )
{
EVP_DigestUpdate ( m_dCtx, data, len );
}
void Final ( HASH20_t& tDigest )
{
unsigned int uLen = SHA1_SIZE;
EVP_DigestFinal_ex ( m_dCtx, tDigest.data(), &uLen );
EVP_MD_CTX_destroy ( m_dCtx );
}
private:
EVP_MD_CTX* m_dCtx;
#else
void Init()
{
state = { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 };
count.fill ( 0 );
}
void Update ( const BYTE* data, int len )
{
int i, j = ( count[0] >> 3 ) & 63;
count[0] += ( len << 3 );
if ( count[0] < (DWORD)( len << 3 ) )
count[1]++;
count[1] += ( len >> 29 );
if ( ( j + len ) > 63 )
{
i = 64 - j;
memcpy ( &buffer[j], data, i );
Transform ( buffer.data() );
for ( ; i + 63 < len; i += 64 )
Transform ( data + i );
j = 0;
} else
i = 0;
memcpy ( &buffer[j], &data[i], len - i );
}
void Final ( HASH20_t& tDigest )
{
std::array<BYTE,8> finalcount;
for ( auto i = 0; i < 8; ++i )
finalcount[i] = (BYTE)( ( count[( i >= 4 ) ? 0 : 1] >> ( ( 3 - ( i & 3 ) ) * 8 ) )
& 255 ); // endian independent
Update ( (const BYTE*)"\200", 1 ); // add padding
while ( ( count[0] & 504 ) != 448 )
Update ( (const BYTE*)"\0", 1 );
Update ( finalcount.data(), 8 ); // should cause a SHA1_Transform()
for ( auto i = 0; i < SHA1_SIZE; ++i )
tDigest[i] = (BYTE)( ( state[i >> 2] >> ( ( 3 - ( i & 3 ) ) * 8 ) ) & 255 );
}
private:
static constexpr int SHA1_BUF_SIZE = 64;
std::array<DWORD,5> state;
std::array<DWORD,2> count;
std::array<BYTE, SHA1_BUF_SIZE> buffer;
void Transform ( const BYTE buf[SHA1_BUF_SIZE] )
{
DWORD a = state[0], b = state[1], c = state[2], d = state[3], e = state[4], block[16];
memset ( block, 0, sizeof ( block ) ); // initial conversion to big-endian units
for ( int i = 0; i < 64; i++ )
block[i >> 2] += buf[i] << ( ( 3 - ( i & 3 ) ) * 8 );
for ( int i = 0; i < 80; i++ ) // do hashing rounds
{
#define LROT( value, bits ) ( ( ( value ) << ( bits ) ) | ( ( value ) >> ( 32 - ( bits ) ) ) )
if ( i >= 16 )
block[i & 15] = LROT (
block[( i + 13 ) & 15] ^ block[( i + 8 ) & 15] ^ block[( i + 2 ) & 15] ^ block[i & 15], 1 );
if ( i < 20 )
e += ( ( b & ( c ^ d ) ) ^ d ) + 0x5A827999;
else if ( i < 40 )
e += ( b ^ c ^ d ) + 0x6ED9EBA1;
else if ( i < 60 )
e += ( ( ( b | c ) & d ) | ( b & c ) ) + 0x8F1BBCDC;
else
e += ( b ^ c ^ d ) + 0xCA62C1D6;
e += block[i & 15] + LROT ( a, 5 );
DWORD t = e;
e = d;
d = c;
c = LROT ( b, 30 );
b = a;
a = t;
}
state[0] += a; // save state
state[1] += b;
state[2] += c;
state[3] += d;
state[4] += e;
}
#endif
public:
HASH20_t FinalHash()
{
HASH20_t dDigest {};
Final ( dDigest );
return dDigest;
}
};
SHA1_c::SHA1_c()
: m_pImpl { new Impl_c }
{}
SHA1_c::~SHA1_c() = default;
void SHA1_c::Init()
{
m_pImpl->Init();
}
void SHA1_c::Update ( const BYTE* pData, int iLen )
{
m_pImpl->Update ( pData, iLen );
}
void SHA1_c::Final ( HASH20_t& tDigest )
{
m_pImpl->Final ( tDigest );
}
HASH20_t SHA1_c::FinalHash()
{
return m_pImpl->FinalHash ();
}
CSphString BinToHex ( const BYTE * pHash, int iLen )
{
static constexpr std::array<char,16> sDigits { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
if ( !iLen )
return "";
CSphString sRes;
int iStrLen = 2*iLen+2;
sRes.Reserve ( iStrLen );
auto * sHash = const_cast<char *> (sRes.cstr ());
for ( int i=0; i<iLen; ++i )
{
*sHash++ = sDigits[pHash[i] >> 4];
*sHash++ = sDigits[pHash[i] & 0x0f];
}
*sHash = '\0';
return sRes;
}
CSphString BinToHex ( const VecTraits_T<BYTE> & dHash )
{
return BinToHex ( dHash.Begin(), dHash.GetLength() );
}
CSphString BinToHex ( const std::array<BYTE, HASH20_SIZE>& dHash )
{
return BinToHex ( dHash.data(), HASH20_SIZE );
}
CSphString CalcSHA1 ( const void * pData, int iLen )
{
SHA1_c dHasher;
dHasher.Init();
dHasher.Update ( (const BYTE*)pData, iLen );
auto dHashValue = dHasher.FinalHash();
return BinToHex ( dHashValue );
}
bool CalcSHA1 ( const CSphString & sFileName, CSphString & sRes, CSphString & sError )
{
CSphAutofile tFile ( sFileName, SPH_O_READ, sError, false );
if ( tFile.GetFD()<0 )
return false;
SHA1_c dHasher;
dHasher.Init();
const int64_t iFileSize = tFile.GetSize();
const int iBufSize = (int)Min ( iFileSize, DEFAULT_READ_BUFFER );
int64_t iOff = 0;
CSphFixedVector<BYTE> dFileData ( iBufSize );
while ( iOff<iFileSize )
{
const int iLen = (int)Min ( iBufSize, iFileSize - iOff );
if ( !tFile.Read ( dFileData.Begin(), iLen, sError ) )
return false;
dHasher.Update ( dFileData.Begin(), iLen );
iOff += iLen;
}
auto dHashValue = dHasher.FinalHash();
sRes = BinToHex ( dHashValue );
return true;
}
//////////////////////////////////////////////////////////////////////////
// WriterWithHash_c - CSphWriter which also calc SHA1 on-the-fly
//////////////////////////////////////////////////////////////////////////
WriterWithHash_c::WriterWithHash_c()
: m_pHasher { std::make_unique<SHA1_c>() }
{
m_pHasher->Init();
}
void WriterWithHash_c::Flush()
{
assert ( !m_bHashDone ); // can't do anything with already finished hash
if ( m_iPoolUsed>0 )
{
m_pHasher->Update ( m_pBuffer.get(), m_iPoolUsed );
CSphWriter::Flush();
}
}
void WriterWithHash_c::CloseFile ()
{
assert ( !m_bHashDone );
CSphWriter::CloseFile ();
m_pHasher->Final ( m_dHashValue );
m_bHashDone = true;
}
//////////////////////////////////////////////////////////////////////////
// TaggedHash20_t - string tag (filename) with 20-bytes binary hash
//////////////////////////////////////////////////////////////////////////
//const std::array<BYTE, HASH20_SIZE> TaggedHash20_t::m_dZeroHash {};
// by tag + hash
TaggedHash20_t::TaggedHash20_t ( const char* sTagName, const BYTE* pHashValue )
: m_sTagName ( sTagName )
{
if ( pHashValue )
memcpy ( m_dHashValue.data(), pHashValue, HASH20_SIZE );
}
TaggedHash20_t::TaggedHash20_t ( const char* sTag, const HASH20_t& dHashValue )
: m_sTagName ( sTag )
, m_dHashValue { dHashValue }
{}
// serialize to FIPS form
CSphString TaggedHash20_t::ToFIPS () const
{
const char * sDigits = "0123456789abcdef";
char sHash [41];
StringBuilder_c sResult;
if ( Empty() )
return "";
for ( auto i = 0; i<HASH20_SIZE; ++i )
{
sHash[i << 1] = sDigits[m_dHashValue[i] >> 4];
sHash[1 + (i << 1)] = sDigits[m_dHashValue[i] & 0x0f];
}
sHash[40] = '\0';
// FIPS-180-1 - checksum, space, "*" (indicator of binary mode), tag
sResult.Appendf ("%s *%s\n", sHash, m_sTagName.cstr());
return sResult.cstr();
}
namespace {
inline BYTE hex_char ( unsigned char c ) noexcept
{
if ( c>=0x30 && c<=0x39 )
return c - '0';
if ( c>=0x61 && c<=0x66 )
return c - 'a' + 10;
assert ( false && "broken hex num - expected digits and a..f letters in the num" );
return 0;
}
}
// de-serialize from FIPS, returns len of parsed chunk of sFIPS or -1 on error
int TaggedHash20_t::FromFIPS ( const char * sFIPS )
{
// expects hash in form FIPS-180-1, that is:
// 45f44fd2db02b08b4189abf21e90edd712c9616d *rt_full.ram\n
// i.e. 40 symbols hex hash in small letters, space, '*' and tag, finished by '\n'
assert ( sFIPS[HASH20_SIZE * 2]==' ' && "broken FIPS - space expected after hash" );
assert ( sFIPS[HASH20_SIZE * 2 + 1]=='*' && "broken FIPS - * expected after hash and space" );
for ( auto i = 0; i<HASH20_SIZE; ++i )
{
BYTE & uCode = m_dHashValue[i];
uCode = hex_char ( sFIPS[i * 2] );
uCode = BYTE ( ( uCode << 4 ) + hex_char ( sFIPS[i * 2 + 1] ) );
}
sFIPS += 2 + HASH20_SIZE * 2;
auto len = (int) strlen ( sFIPS );
if ( sFIPS[len - 1]!='\n' )
return -1;
m_sTagName.SetBinary ( sFIPS, len - 1 );
return len;
}
bool TaggedHash20_t::operator== ( const BYTE * pRef ) const
{
assert ( pRef );
return !memcmp ( m_dHashValue.data(), pRef, HASH20_SIZE );
}
| 9,292
|
C++
|
.cpp
| 308
| 27.896104
| 129
| 0.594398
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,918
|
tracer.cpp
|
manticoresoftware_manticoresearch/src/tracer.cpp
|
//
// Copyright (c) 2022-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org
//
#include "tracer.h"
#ifdef PERFETTO
#include "sphinxstd.h"
#include "fileio.h"
#include "fileutils.h"
#include "sphinxutils.h"
#include "perfetto.h"
class Trace2File_c
{
bool m_bActive = false;
CSphString m_sLastError;
CSphAutofile m_fdLog;
std::unique_ptr<perfetto::TracingSession> m_pTracing;
public:
Trace2File_c () = default;
~Trace2File_c()
{
Deinit();
}
const CSphString& GetLastError() const noexcept { return m_sLastError; }
bool Init ( const CSphString& sFilename, int64_t iMaxSize )
{
sphWarning ( "Called tracer::Init with '%s', size " INT64_FMT, sFilename.scstr(), iMaxSize );
if ( m_bActive )
Deinit();
if ( m_fdLog.Open ( sFilename.cstr(), SPH_O_NEW, m_sLastError ) >=0 )
{
// Event
perfetto::protos::gen::TrackEventConfig tTrackEventCfg;
tTrackEventCfg.add_disabled_categories ( "*" );
tTrackEventCfg.add_enabled_categories ( "*" );
// tTrackEventCfg.add_enabled_categories ( "rt" );
// tTrackEventCfg.add_enabled_categories ( "network" );
// tTrackEventCfg.add_enabled_categories ( "conn" );
// tTrackEventCfg.add_enabled_categories ( "wait" );
// tTrackEventCfg.add_enabled_categories ( "mem" );
// Trace
perfetto::TraceConfig tTraceCfg;
tTraceCfg.add_buffers()->set_size_kb ( iMaxSize ); // Record up to given limit
// Trace data source
auto* pDataSourceCfg = tTraceCfg.add_data_sources()->mutable_config();
pDataSourceCfg->set_name ( "track_event" );
pDataSourceCfg->set_track_event_config_raw ( tTrackEventCfg.SerializeAsString() );
// start tracing
m_pTracing = perfetto::Tracing::NewTrace();
m_pTracing->Setup ( tTraceCfg, m_fdLog.GetFD() );
m_pTracing->StartBlocking();
m_bActive = true;
}
return m_bActive;
}
bool Deinit()
{
sphWarning ( "Called tracer::Deinit()" );
if ( !m_bActive )
return false;
perfetto::TrackEvent::Flush();
m_pTracing->StopBlocking();
m_pTracing.reset();
CSphString sFile = m_fdLog.GetFilename();
m_fdLog.Close();
auto iWritten = sphGetFileSize ( sFile );
m_bActive = false;
sphWarning ( "tracer::Deinit(): " INT64_FMT " bytes written to '%s'", iWritten, sFile.cstr() );
return true;
}
};
PERFETTO_TRACK_EVENT_STATIC_STORAGE();
static Trace2File_c& GetTracer()
{
static Trace2File_c tTracer;
return tTracer;
}
void Tracer::Start ( const CSphString& sFile, int64_t iMaxSize )
{
GetTracer().Init ( sFile, iMaxSize );
}
void Tracer::Stop()
{
GetTracer().Deinit ();
}
void Tracer::Init()
{
perfetto::TracingInitArgs args;
args.backends |= perfetto::kInProcessBackend;
// args.backends |= perfetto::kSystemBackend;
perfetto::Tracing::Initialize ( args );
perfetto::TrackEvent::Register();
}
#endif
| 3,160
|
C++
|
.cpp
| 101
| 28.950495
| 97
| 0.717907
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,919
|
accumulator.cpp
|
manticoresoftware_manticoresearch/src/accumulator.cpp
|
//
// Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)
// Copyright (c) 2001-2016, Andrew Aksyonoff
// Copyright (c) 2008-2016, Sphinx Technologies Inc
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "accumulator.h"
#include "sphinxrt.h"
#include "columnarmisc.h"
#include "memio.h"
#include "tracer.h"
#include <memory>
std::unique_ptr<ReplicationCommand_t> MakeReplicationCommand ( ReplCmd_e eCommand, CSphString sIndex, CSphString sCluster )
{
auto pCmd = std::make_unique<ReplicationCommand_t>();
pCmd->m_eCommand = eCommand;
pCmd->m_sCluster = std::move ( sCluster );
pCmd->m_sIndex = std::move ( sIndex );
return pCmd;
}
ReplicationCommand_t* RtAccum_t::AddCommand ( ReplCmd_e eCmd, CSphString sIndex, CSphString sCluster )
{
// all writes to RT index go as single command to serialize accumulator
if ( eCmd == ReplCmd_e::RT_TRX && !m_dCmd.IsEmpty() && m_dCmd.Last()->m_eCommand == ReplCmd_e::RT_TRX )
return m_dCmd.Last().get();
m_dCmd.Add ( MakeReplicationCommand ( eCmd, std::move ( sIndex ), std::move ( sCluster ) ) );
return m_dCmd.Last().get();
}
void RtAccum_t::SetupDict ( const RtIndex_i* pIndex, const DictRefPtr_c& pDict, bool bKeywordDict )
{
if ( pIndex == m_pIndex && pDict.Ptr() == m_pRefDict && bKeywordDict == m_bKeywordDict )
return;
m_bKeywordDict = bKeywordDict;
m_pRefDict = pDict.Ptr();
m_pDict = GetStatelessDict ( pDict );
if ( m_bKeywordDict )
{
m_pDict = m_pDictRt = sphCreateRtKeywordsDictionaryWrapper ( m_pDict, pIndex->NeedStoreWordID() );
}
}
void RtAccum_t::ResetDict()
{
assert ( !m_bKeywordDict || m_pDictRt );
if ( m_pDictRt )
m_pDictRt->ResetKeywords();
m_dPackedKeywords.Reset ( 0 );
}
const BYTE* RtAccum_t::GetPackedKeywords() const
{
return m_dPackedKeywords.IsEmpty() ? m_pDictRt->GetPackedKeywords() : m_dPackedKeywords.begin();
}
int RtAccum_t::GetPackedLen() const
{
return m_dPackedKeywords.IsEmpty() ? m_pDictRt->GetPackedLen() : m_dPackedKeywords.GetLength();
}
void RtAccum_t::Sort()
{
TRACE_CONN ( "conn", "RtAccum_t::Sort" );
if ( !m_bKeywordDict )
m_dAccum.Sort ( Lesser ( [] ( const CSphWordHit& a, const CSphWordHit& b )
{
return ( a.m_uWordID<b.m_uWordID ) ||
( a.m_uWordID==b.m_uWordID && a.m_tRowID<b.m_tRowID ) ||
( a.m_uWordID==b.m_uWordID && a.m_tRowID==b.m_tRowID && HITMAN::GetPosWithField ( a.m_uWordPos )<HITMAN::GetPosWithField ( b.m_uWordPos ) );
}));
else
{
assert ( m_pDictRt );
m_dAccum.Sort ( Lesser ( [pPackedKeywords = GetPackedKeywords()] ( const CSphWordHit& a, const CSphWordHit& b )
{
const BYTE* pPackedA = pPackedKeywords + a.m_uWordID;
const BYTE* pPackedB = pPackedKeywords + b.m_uWordID;
int iCmp = sphDictCmpStrictly ( (const char*)pPackedA + 1, *pPackedA, (const char*)pPackedB + 1, *pPackedB );
return ( iCmp < 0 ) || ( iCmp == 0 && a.m_tRowID < b.m_tRowID ) || ( iCmp == 0 && a.m_tRowID == b.m_tRowID && HITMAN::GetPosWithField ( a.m_uWordPos ) < HITMAN::GetPosWithField ( b.m_uWordPos ) );
}));
}
}
void RtAccum_t::CleanupPart()
{
m_dAccumRows.Resize ( 0 );
m_dBlobs.Resize ( 0 );
m_pColumnarBuilder.reset();
m_dPerDocHitsCount.Resize ( 0 );
m_dAccum.Resize ( 0 );
m_pDocstore.reset();
ResetDict();
ResetRowID();
}
void RtAccum_t::Cleanup()
{
CleanupPart();
m_pIndex = nullptr;
m_pBlobWriter.reset();
m_uAccumDocs = 0;
m_iAccumBytes = 0;
m_dAccumKlist.Reset();
m_sIndexName = CSphString();
m_iIndexId = 0;
m_dCmd.Reset();
}
void RtAccum_t::SetupDocstore()
{
if ( m_pDocstore )
return;
m_pDocstore = CreateDocstoreRT();
assert ( m_pDocstore );
SetupDocstoreFields ( *m_pDocstore, m_pIndex->GetInternalSchema() );
}
bool RtAccum_t::SetupDocstore ( const RtIndex_i& tIndex, CSphString& sError )
{
const CSphSchema& tSchema = tIndex.GetInternalSchema();
if ( !m_pDocstore && !tSchema.HasStoredFields() && !tSchema.HasStoredAttrs() )
return true;
// might be a case when replicated trx was wo docstore but index has docstore
if ( !m_pDocstore )
m_pDocstore = CreateDocstoreRT();
assert ( m_pDocstore );
SetupDocstoreFields ( *m_pDocstore, tSchema );
return m_pDocstore->CheckFieldsLoaded ( sError );
}
[[nodiscard]] bool RtAccum_t::IsClusterCommand() const noexcept
{
return ( m_dCmd.GetLength () && !m_dCmd[0]->m_sCluster.IsEmpty () );
}
[[nodiscard]] bool RtAccum_t::IsUpdateCommand() const noexcept
{
return ( m_dCmd.GetLength () &&
( m_dCmd[0]->m_eCommand==ReplCmd_e::UPDATE_API
|| m_dCmd[0]->m_eCommand==ReplCmd_e::UPDATE_QL
|| m_dCmd[0]->m_eCommand==ReplCmd_e::UPDATE_JSON ) );
}
static void ResetTailHit ( CSphWordHit * pHit )
{
if ( pHit->m_tRowID!=pHit[1].m_tRowID || pHit->m_uWordID!=pHit[1].m_uWordID )
return;
if ( HITMAN::GetField ( pHit->m_uWordPos )==HITMAN::GetField ( pHit[1].m_uWordPos ) && HITMAN::IsEnd ( pHit[1].m_uWordPos ) )
pHit->m_uWordPos = HITMAN::GetPosWithField ( pHit->m_uWordPos );
}
void RtAccum_t::AddDocument ( ISphHits* pHits, const InsertDocData_c& tDoc, bool bReplace, int iRowSize, const DocstoreBuilder_i::Doc_t* pStoredDoc )
{
MEMORY ( MEM_RT_ACCUM );
// FIXME? what happens on mixed insert/replace?
m_bReplace = bReplace;
DocID_t tDocID = tDoc.GetID();
// schedule existing copies for deletion
m_dAccumKlist.Add ( tDocID );
// reserve some hit space on first use
if ( pHits && pHits->GetLength() && !m_dAccum.GetLength() )
m_dAccum.Reserve ( 128 * 1024 );
// accumulate row data; expect fully dynamic rows
assert ( !tDoc.m_tDoc.m_pStatic );
assert ( !( !tDoc.m_tDoc.m_pDynamic && iRowSize != 0 ) );
assert ( !( tDoc.m_tDoc.m_pDynamic && (int)tDoc.m_tDoc.m_pDynamic[-1] != iRowSize ) );
CSphRowitem* pRow = nullptr;
if ( iRowSize )
{
m_dAccumRows.Append ( tDoc.m_tDoc.m_pDynamic, iRowSize );
pRow = &m_dAccumRows[m_dAccumRows.GetLength() - iRowSize];
}
CSphString sError;
int iStrAttr = 0;
int iBlobAttr = 0;
int iColumnarAttr = 0;
int iMva = 0;
CSphVector<int64_t> dTempKNN;
const char** ppStr = tDoc.m_dStrings.Begin();
const CSphSchema& tSchema = m_pIndex->GetInternalSchema();
for ( int i = 0; i < tSchema.GetAttrsCount(); ++i )
{
const CSphColumnInfo& tColumn = tSchema.GetAttr ( i );
switch ( tColumn.m_eAttrType )
{
case SPH_ATTR_STRING:
case SPH_ATTR_JSON:
{
const BYTE* pStr = ppStr ? (const BYTE*)ppStr[iStrAttr++] : nullptr;
ByteBlob_t dStr;
if ( tColumn.m_eAttrType == SPH_ATTR_STRING )
dStr = { pStr, pStr ? (int)strlen ( (const char*)pStr ) : 0 };
else // SPH_ATTR_JSON - packed len + data
dStr = sphUnpackPtrAttr ( pStr );
if ( tColumn.IsColumnar() )
m_pColumnarBuilder->SetAttr ( iColumnarAttr, dStr.first, dStr.second );
else
m_pBlobWriter->SetAttr ( iBlobAttr, dStr.first, dStr.second, sError );
}
break;
case SPH_ATTR_UINT32SET:
case SPH_ATTR_INT64SET:
case SPH_ATTR_FLOAT_VECTOR:
{
int iNumValues = 0;
bool bDefault = false;
const int64_t * pMva = tDoc.GetMVA(iMva);
std::tie ( iNumValues, bDefault ) = tDoc.ReadMVALength(pMva);
iMva += iNumValues + 1;
// fill default/missing float_vector+knn attributes with zeroes
if ( tColumn.m_eAttrType==SPH_ATTR_FLOAT_VECTOR && tColumn.IsIndexedKNN() && bDefault )
{
dTempKNN.Resize ( tColumn.m_tKNN.m_iDims );
dTempKNN.ZeroVec();
pMva = dTempKNN.Begin();
iNumValues = dTempKNN.GetLength();
}
if ( tColumn.IsColumnar() )
m_pColumnarBuilder->SetAttr ( iColumnarAttr, pMva, iNumValues );
else
m_pBlobWriter->SetAttr ( iBlobAttr, (const BYTE*)pMva, iNumValues * sizeof ( int64_t ), sError );
}
break;
default:
if ( tColumn.IsColumnar() )
m_pColumnarBuilder->SetAttr ( iColumnarAttr, tDoc.m_dColumnarAttrs[iColumnarAttr] );
break;
}
if ( tColumn.IsColumnar() )
++iColumnarAttr;
else if ( sphIsBlobAttr ( tColumn ) )
++iBlobAttr;
}
if ( m_pBlobWriter )
{
const CSphColumnInfo* pBlobLoc = tSchema.GetAttr ( sphGetBlobLocatorName() );
assert ( pBlobLoc );
sphSetRowAttr ( pRow, pBlobLoc->m_tLocator, m_pBlobWriter->Flush().first );
}
// handle index_field_lengths
DWORD* pFieldLens = nullptr;
if ( m_pIndex->GetSettings().m_bIndexFieldLens )
{
int iFirst = tSchema.GetAttrId_FirstFieldLen();
assert ( tSchema.GetAttr ( iFirst ).m_eAttrType == SPH_ATTR_TOKENCOUNT );
assert ( tSchema.GetAttr ( iFirst + tSchema.GetFieldsCount() - 1 ).m_eAttrType == SPH_ATTR_TOKENCOUNT );
pFieldLens = pRow + ( tSchema.GetAttr ( iFirst ).m_tLocator.m_iBitOffset / 32 );
memset ( pFieldLens, 0, sizeof ( int ) * tSchema.GetFieldsCount() ); // NOLINT
}
// accumulate hits
int iHits = 0;
if ( pHits && !pHits->IsEmpty() )
{
CSphWordHit tLastHit;
tLastHit.m_tRowID = INVALID_ROWID;
tLastHit.m_uWordID = 0;
tLastHit.m_uWordPos = 0;
Hitpos_t uFieldLastHit = pHits->Begin()->m_uWordPos;
DWORD uFieldLastCount = 1;
m_dAccum.ReserveGap ( pHits->GetLength() );
iHits = 0;
for ( CSphWordHit* pHit = pHits->Begin(); pHit < pHits->End(); ++pHit )
{
// ignore duplicate hits
if ( *pHit == tLastHit )
continue;
// update field lengths
if ( pFieldLens )
{
if ( HITMAN::GetField ( uFieldLastHit ) != HITMAN::GetField ( pHit->m_uWordPos ) )
{
pFieldLens[HITMAN::GetField ( uFieldLastHit )] += uFieldLastCount;
uFieldLastCount = 1;
uFieldLastHit = pHit->m_uWordPos;
}
// skip blended part, lemmas and duplicates
if ( HITMAN::GetPos ( pHit->m_uWordPos ) > HITMAN::GetPos ( uFieldLastHit ) )
{
uFieldLastHit = pHit->m_uWordPos;
uFieldLastCount++;
}
}
// need original hit for duplicate removal
tLastHit = *pHit;
// reset field end for not very last position in this field
if ( HITMAN::IsEnd ( pHit->m_uWordPos ) && pHit!=&pHits->Last() )
ResetTailHit ( pHit );
// accumulate
m_dAccum.Add ( *pHit );
++iHits;
}
if ( pFieldLens && uFieldLastCount )
{
pFieldLens[HITMAN::GetField ( uFieldLastHit )] += uFieldLastCount;
}
}
// make sure to get real count without duplicated hits
m_dPerDocHitsCount.Add ( iHits );
if ( pStoredDoc )
{
SetupDocstore();
m_pDocstore->AddDoc ( m_uAccumDocs, *pStoredDoc );
}
++m_uAccumDocs;
m_iAccumBytes += tDoc.m_iTotalBytes;
}
struct AccumDocHits_t
{
DocID_t m_tDocID;
int m_iDocIndex;
// int m_iHitIndex;
// int m_iHitCount;
};
void RtAccum_t::CleanupDuplicates ( int iRowSize )
{
TRACE_CONN ( "conn", "RtAccum_t::CleanupDuplicates" );
if ( m_uAccumDocs <= 1 )
return;
assert ( m_uAccumDocs == (DWORD)m_dPerDocHitsCount.GetLength() );
CSphVector<AccumDocHits_t> dDocHits ( m_dPerDocHitsCount.GetLength() );
assert ( m_pIndex );
const CSphSchema& tSchema = m_pIndex->GetInternalSchema();
bool bColumnarId = tSchema.GetAttr ( 0 ).IsColumnar();
{
// create temporary columnar accessor; don't take ownership of built attributes
auto pColumnar = CreateLightColumnarRT ( m_pIndex->GetInternalSchema(), m_pColumnarBuilder.get() );
std::string sError;
std::unique_ptr<columnar::Iterator_i> pColumnarIdIterator;
if ( bColumnarId )
{
pColumnarIdIterator = CreateColumnarIterator ( pColumnar.get(), sphGetDocidName(), sError );
assert ( pColumnarIdIterator );
}
// int iHitIndex = 0;
CSphRowitem* pRow = m_dAccumRows.Begin();
for ( DWORD i = 0; i < m_uAccumDocs; ++i, pRow += iRowSize )
{
AccumDocHits_t& tElem = dDocHits[i];
if ( !bColumnarId )
tElem.m_tDocID = sphGetDocID ( pRow );
else
tElem.m_tDocID = pColumnarIdIterator->Get(i);
tElem.m_iDocIndex = i;
// tElem.m_iHitIndex = iHitIndex;
// tElem.m_iHitCount = m_dPerDocHitsCount[i];
// iHitIndex += m_dPerDocHitsCount[i];
}
}
dDocHits.Sort ( Lesser ( [] ( const AccumDocHits_t& a, const AccumDocHits_t& b )
{
return ( a.m_tDocID < b.m_tDocID || ( a.m_tDocID == b.m_tDocID && a.m_iDocIndex < b.m_iDocIndex ) );
}));
DocID_t uPrev = 0;
if ( !dDocHits.any_of ( [&] ( const AccumDocHits_t& dDoc ) {
bool bRes = dDoc.m_tDocID == uPrev;
uPrev = dDoc.m_tDocID;
return bRes;
} ) )
return;
CSphFixedVector<RowID_t> dRowMap ( m_uAccumDocs );
for ( auto& i : dRowMap )
i = 0;
// identify duplicates to kill
if ( m_bReplace )
{
// replace mode, last value wins, precending values are duplicate
for ( DWORD i = 0; i < m_uAccumDocs - 1; ++i )
if ( dDocHits[i].m_tDocID == dDocHits[i + 1].m_tDocID )
dRowMap[dDocHits[i].m_iDocIndex] = INVALID_ROWID;
} else
{
// insert mode, first value wins, subsequent values are duplicates
for ( DWORD i = 1; i < m_uAccumDocs; ++i )
if ( dDocHits[i].m_tDocID == dDocHits[i - 1].m_tDocID )
dRowMap[dDocHits[i].m_iDocIndex] = INVALID_ROWID;
}
RowID_t tNextRowID = 0;
for ( auto& i : dRowMap )
if ( i != INVALID_ROWID )
i = tNextRowID++;
// remove duplicate hits and compact hit.rowid
// might be document without hits
// but hits after that document should be still remapped \ compacted
// that is why can not use short-cut of
// if ( tSrcRowID!=INVALID_ROWID ) -> if ( i!=iDstRow )
int iDstRow = 0;
for ( int i = 0, iLen = m_dAccum.GetLength(); i < iLen; ++i )
{
const auto& dSrcHit = m_dAccum[i];
RowID_t tSrcRowID = dRowMap[dSrcHit.m_tRowID];
if ( tSrcRowID != INVALID_ROWID )
{
CSphWordHit& tDstHit = m_dAccum[iDstRow];
tDstHit = dSrcHit;
tDstHit.m_tRowID = tSrcRowID;
++iDstRow;
}
}
m_dAccum.Resize ( iDstRow );
RemoveColumnarDuplicates ( m_pColumnarBuilder, dRowMap, tSchema );
iDstRow = 0;
ARRAY_FOREACH ( i, dRowMap )
{
if ( dRowMap[i] != INVALID_ROWID )
{
if ( i != iDstRow )
{
// remove duplicate docinfo
// but all attributes could be columnar
if ( iRowSize )
memcpy ( &m_dAccumRows[iDstRow * iRowSize], &m_dAccumRows[i * iRowSize], iRowSize * sizeof ( CSphRowitem ) );
// remove duplicate docstore
if ( m_pDocstore )
m_pDocstore->SwapRows ( iDstRow, i );
}
++iDstRow;
}
}
m_dAccumRows.Resize ( iDstRow * iRowSize );
m_uAccumDocs = iDstRow;
if ( m_pDocstore )
m_pDocstore->DropTail ( iDstRow );
}
void RtAccum_t::GrabLastWarning ( CSphString& sWarning )
{
if ( m_pDictRt && m_pDictRt->GetLastWarning() )
{
sWarning = m_pDictRt->GetLastWarning();
m_pDictRt->ResetWarning();
}
}
void RtAccum_t::SetIndex ( RtIndex_i * pIndex )
{
assert ( pIndex );
m_iIndexGeneration = pIndex->GetAlterGeneration();
m_pIndex = pIndex;
m_pBlobWriter.reset();
m_sIndexName = pIndex->GetName();
m_iIndexId = pIndex->GetIndexId();
const CSphSchema& tSchema = pIndex->GetInternalSchema();
if ( tSchema.HasBlobAttrs() )
m_pBlobWriter = sphCreateBlobRowBuilder ( tSchema, m_dBlobs );
if ( !m_pColumnarBuilder )
m_pColumnarBuilder = CreateColumnarBuilderRT ( tSchema );
m_uSchemaHash = pIndex->GetSchemaHash();
}
RowID_t RtAccum_t::GenerateRowID()
{
return m_tNextRowID++;
}
void RtAccum_t::ResetRowID()
{
m_tNextRowID = 0;
}
void RtAccum_t::LoadRtTrx ( ByteBlob_t tTrx, DWORD uVer )
{
MemoryReader_c tReader ( tTrx );
m_bReplace = !!tReader.GetVal<BYTE>();
tReader.GetVal ( m_uAccumDocs );
if ( uVer>=0x106 )
tReader.GetVal ( m_iAccumBytes );
// insert and replace
m_dAccum.Resize ( tReader.GetDword() );
for ( CSphWordHit& tHit : m_dAccum )
{
// such manual serialization is necessary because CSphWordHit is internally aligned by 8,
// and it's size is 3*8, however actually we have 4+8+4 bytes in members.
// Sending raw unitialized bytes is not ok, since it may influent crc checking.
tReader.GetVal ( tHit.m_tRowID );
tReader.GetVal ( tHit.m_uWordID );
tReader.GetVal ( tHit.m_uWordPos );
}
GetArray ( m_dAccumRows, tReader );
GetArray ( m_dBlobs, tReader );
GetArray ( m_dPerDocHitsCount, tReader );
m_dPackedKeywords.Reset ( tReader.GetDword() );
tReader.GetBytes ( m_dPackedKeywords.Begin(), (int)m_dPackedKeywords.GetLengthBytes() );
if ( tReader.GetVal<BYTE>() )
{
if ( !m_pDocstore )
m_pDocstore = CreateDocstoreRT();
assert ( m_pDocstore );
m_pDocstore->Load ( tReader );
}
if ( tReader.GetVal<BYTE>() )
m_pColumnarBuilder = CreateColumnarBuilderRT ( tReader );
// delete
GetArray ( m_dAccumKlist, tReader );
}
void RtAccum_t::SaveRtTrx ( MemoryWriter_c& tWriter ) const
{
tWriter.PutByte ( m_bReplace ); // this need only for data sort on commit
tWriter.PutDword ( m_uAccumDocs );
tWriter.PutVal ( m_iAccumBytes );
// insert and replace
tWriter.PutDword ( m_dAccum.GetLength() );
for ( const CSphWordHit& tHit : m_dAccum )
{
tWriter.PutVal ( tHit.m_tRowID );
tWriter.PutVal ( tHit.m_uWordID );
tWriter.PutVal ( tHit.m_uWordPos );
}
SaveArray ( m_dAccumRows, tWriter );
SaveArray ( m_dBlobs, tWriter );
SaveArray ( m_dPerDocHitsCount, tWriter );
// packed keywords default length is 1 no need to pass that
int iLen = ( m_bKeywordDict && m_pDictRt->GetPackedLen() > 1 ? (int)m_pDictRt->GetPackedLen() : 0 );
tWriter.PutDword ( iLen );
if ( iLen )
tWriter.PutBytes ( m_pDictRt->GetPackedKeywords(), iLen );
tWriter.PutByte ( m_pDocstore != nullptr );
if ( m_pDocstore )
m_pDocstore->Save ( tWriter );
tWriter.PutByte ( m_pColumnarBuilder != nullptr );
if ( m_pColumnarBuilder )
m_pColumnarBuilder->Save ( tWriter );
// delete
SaveArray ( m_dAccumKlist, tWriter );
}
| 17,415
|
C++
|
.cpp
| 511
| 31.254403
| 199
| 0.69131
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
16,920
|
sphinxversion.cpp
|
manticoresoftware_manticoresearch/src/sphinxversion.cpp
|
//
// Copyright (c) 2019-2024, Manticore Software LTD (https://manticoresearch.com)
// All rights reserved
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License. You should have
// received a copy of the GPL license along with this program; if you
// did not, you can find it at http://www.gnu.org/
//
#include "gen_sphinxversion.h"
#ifndef BUILD_TAG
#define BANNER_TAG ""
#else
#define BANNER_TAG " " BUILD_TAG
#endif
#ifndef GIT_TIMESTAMP_ID
#define GIT_TIMESTAMP_ID "000101"
#endif
// this line is deprecated and no more used. Leaved here for a while.
// numbers now to be defined via sphinxversion.h
#ifndef VERNUMBERS
#define VERNUMBERS "7.7.7"
#endif
#define PRODUCT_VERSION VERNUMBERS " " SPH_GIT_COMMIT_ID "@" GIT_TIMESTAMP_ID BANNER_TAG
#define PRODUCT_NAME "Manticore " PRODUCT_VERSION
#define PRODUCT_BANNER_TEXT "\nCopyright (c) 2001-2016, Andrew Aksyonoff\n" \
"Copyright (c) 2008-2016, Sphinx Technologies Inc (http://sphinxsearch.com)\n" \
"Copyright (c) 2017-2024, Manticore Software LTD (https://manticoresearch.com)\n\n"
#define PRODUCT_BANNER PRODUCT_NAME PRODUCT_BANNER_TEXT
const char * szMANTICORE_VERSION = PRODUCT_VERSION;
const char * szMANTICORE_NAME = PRODUCT_NAME;
const char * szMANTICORE_BANNER = PRODUCT_BANNER;
const char * szMANTICORE_BANNER_TEXT = PRODUCT_BANNER_TEXT;
const char * szGIT_COMMIT_ID = SPH_GIT_COMMIT_ID;
const char * szGIT_BRANCH_ID = GIT_BRANCH_ID;
const char * szGDB_SOURCE_DIR = GDB_SOURCE_DIR;
| 1,560
|
C++
|
.cpp
| 36
| 42.055556
| 97
| 0.752802
|
manticoresoftware/manticoresearch
| 8,893
| 493
| 500
|
GPL-3.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.